分布式稀疏矩阵的数据结构(data structure for distributed sparse matrix)
2015-04-30 01:36
405 查看
研究下sparse matrix, 最近正好看了一个项目: hpcg。 一周多时间,感觉蛮有意思的。rebuild the wheel.
1
struct SparseMatrix
2 {
3
/* geometry data structure associated with this sparsematrix */
4
Geometry* geom;
5
6
/* global variable */
7
global_int_t totalNumberOfRows;
8
global_int_t totalNumberOfNonzeros;
9
10
/*local variable */
11
local_int_t localNumberOfRows;
12
local_int_t localNumberOfColumns;
13
local_int_t localNumberOfNonzeros;
14
global_int_t** mtxIndG;
15 global_int_t** mtxIndL;
//some index in local is from external procs
16
double** matrixValues;
17
18
char* nonzerosInRow;
19
20
/* global/local map define */
21
std::map<global_int_t, local_int_t> globalToLocalMap;
23
std::vector<local_int_t, global_int_t> localToGlobalMap;
25
26
27
/* MPI communication coefficients */
28 local_int_t numberOfExternalValues;
// number that need receving from external procs
29
int numberOfSendNeighbors;
30 local_int_t totalToBeSent;
// total elements to be send to all required neigbhors
31 local_int_t* elementsToSend;
//an array, store all elements that need to be sent to differnt neighbors
32
int* neighbors;
33 local_int_t* receiveLength;
// an array, each element in this array store the length of matrix elments that will receive from a certain neighbor
34 local_int_t* sendLength;
// an array, each element in this array store the lenght of matrix elments that will be send to a certain neighbor
35
double* sendBuffer;
// a buffer used to store all elements entries
36 };
void allocate_spmatrix()
4 {
5
local_int_t localNumberOfRows = nx * ny * nz;
6
local_int_t numberOfNonzerosPerRow = 27 ;
7
global_int_t totalNumberOfRows = localNumberOfRows * A.geom->size;
8
9
char* nonzerosInRow =
new char[localNumberOfRows];
10
global_int_t** mtxIndG = new global_int_t*[localNumberOfRows];
11
local_int_t** mtxIndL = new local_int_t*[localNumberOfRows];
12
13
double** matrixValues =
new double*[localNumberOfRows];
14
15
InitializeVector(*b, localNubmerOfRows);
16
double* bv =
0;
17
bv = b->values;
18
19
A.localToGlobalMap.resize(localNumberOfRows);
//define the size of local
20
21
//inital A
22
for(local_int_t i=0; i<localNumberOfRows; i++)
23
{
24
matrixValues[i] = 0;
25
mtxIndG[i] = 0;
26
mtxIndL[i] = 0;
27
}
28
29
for(local_int_t i=0; i<localNumberOfRows; i++)
30
{
31
matrixValues[i] = new local_int_t[numberOfNonzerosPerRow];
32
mtxIndL[i] = new local_int_t[numberOfNonzerosPerRow];
33
mtxIndG[i] = new global_int_t[numberOfNonzerosPerRow];
34
}
35
36 }
// fill in element entries in sparse matrix
void fill_in_spmatrix()
// mpi communicate coefficients
void mpi_spmatrix()
{
43
std::map<int, std::set<global_int_t> > sendList, receiveList;
44
// cpu_rank ---> GlobalRows
45
46
typedef std::map<int, std::set<global_int_t> >: iterator map_iter;
47
typedef std::set<global_int_t>::iterator set_iter;
48
49
std::map<local_int_t, local_int_t> externalToLocalMap;
50
51
for(local_int_t i=0; i<localNumberOfRows; i++)
52
{
53
global_int_t currentGlobalRow = localToGlobalMap[i];
54
for(int j=0; j<nonzerosInRow[i];j++)
55
{
56
global_int_t curIndex = mtxIndG[i][j];
57
int rankIdOfColumnEntry = ComputeRankOfMatrixRow();
58
if(A.geom->rank != rankIdOfColumnEntry)
59 {
//it comes from another procs
60
receiveList[rankIdOfColumnEntry].insert(curIndex);
61
sendList[rankOfColumnEntry].insert(currentGlobalRow);
62
}
63
}
64
}
65
66
//count number of matrix entries to send and receive
67
local_int_t totalToBeSent = 0;
68
for(map_iter curNeighbor = sendList.begin(); curNeighbor != sendList.end(); ++curNeighbor)
69
totalToBeSent += (curNeighbor->second).size();
70
71
local_int_t totalToBeReceived = 0;
72
for(map_iter curNeighbor = receiveList.begin(); curNeighbor != receiveList.end(); ++curNeighbor)
73
totalToBeReceived += (curNeighbor->second).size();
74
75
//arrays and lists needed by ExchangeHalo function
76
double* sendBuffer =
new double[totalToBeSent];
77
local_int_t* elementsToSend = new local_int_t[totalToBeSent];
78
int* neighbors =
new int[sendList.size()];
79
local_int_t* receiveLength = new local_int_t[receiveList.size()];
80
local_int_t* sendLength = new local_int_t[sendList.size()];
81
82
int neighborCount =
0;
83
local_int_t receiveEntryCount = 0;
84
local_int_t sendEntryCount = 0;
85
86
for(map_iter curNeighbor=receiveList.begin(), curNeighbor != receiveList.end(); curNeighbor++, neighborCount++)
87
{
88
int neighborId = curNeighbor->first;
89
neighbors[neighborCount] = neighborId;
90
receiveLength[neighborCount] = receiveList[neighborId].size();
91
sendLength[neighborCount] = sendList[neighborId].size();
92
93
for(set_iter i=receiveList[neighborId].begin(); i != receiveList[neighborid].end(); i++, receiveE
ntryCount++)
94
{
95
// *i is global_int, while defination of externalToLocalMap is lcoal to local map ??
96
externalToLocalMap[*i] = localNumberOfRows + receiveEntryCount;
97
}
98
99
for(set_iter i=sendList[neighborId].begin(); i!=sendList[neighbor].end(); i++, sendEntryCount++)
100
{
101
elementsToSend[sendEntryCount] = A.globalToLocalMap[*i];
102
}
103
}
104
105
//update local index
}
1
struct SparseMatrix
2 {
3
/* geometry data structure associated with this sparsematrix */
4
Geometry* geom;
5
6
/* global variable */
7
global_int_t totalNumberOfRows;
8
global_int_t totalNumberOfNonzeros;
9
10
/*local variable */
11
local_int_t localNumberOfRows;
12
local_int_t localNumberOfColumns;
13
local_int_t localNumberOfNonzeros;
14
global_int_t** mtxIndG;
15 global_int_t** mtxIndL;
//some index in local is from external procs
16
double** matrixValues;
17
18
char* nonzerosInRow;
19
20
/* global/local map define */
21
std::map<global_int_t, local_int_t> globalToLocalMap;
23
std::vector<local_int_t, global_int_t> localToGlobalMap;
25
26
27
/* MPI communication coefficients */
28 local_int_t numberOfExternalValues;
// number that need receving from external procs
29
int numberOfSendNeighbors;
30 local_int_t totalToBeSent;
// total elements to be send to all required neigbhors
31 local_int_t* elementsToSend;
//an array, store all elements that need to be sent to differnt neighbors
32
int* neighbors;
33 local_int_t* receiveLength;
// an array, each element in this array store the length of matrix elments that will receive from a certain neighbor
34 local_int_t* sendLength;
// an array, each element in this array store the lenght of matrix elments that will be send to a certain neighbor
35
double* sendBuffer;
// a buffer used to store all elements entries
36 };
void allocate_spmatrix()
4 {
5
local_int_t localNumberOfRows = nx * ny * nz;
6
local_int_t numberOfNonzerosPerRow = 27 ;
7
global_int_t totalNumberOfRows = localNumberOfRows * A.geom->size;
8
9
char* nonzerosInRow =
new char[localNumberOfRows];
10
global_int_t** mtxIndG = new global_int_t*[localNumberOfRows];
11
local_int_t** mtxIndL = new local_int_t*[localNumberOfRows];
12
13
double** matrixValues =
new double*[localNumberOfRows];
14
15
InitializeVector(*b, localNubmerOfRows);
16
double* bv =
0;
17
bv = b->values;
18
19
A.localToGlobalMap.resize(localNumberOfRows);
//define the size of local
20
21
//inital A
22
for(local_int_t i=0; i<localNumberOfRows; i++)
23
{
24
matrixValues[i] = 0;
25
mtxIndG[i] = 0;
26
mtxIndL[i] = 0;
27
}
28
29
for(local_int_t i=0; i<localNumberOfRows; i++)
30
{
31
matrixValues[i] = new local_int_t[numberOfNonzerosPerRow];
32
mtxIndL[i] = new local_int_t[numberOfNonzerosPerRow];
33
mtxIndG[i] = new global_int_t[numberOfNonzerosPerRow];
34
}
35
36 }
// fill in element entries in sparse matrix
void fill_in_spmatrix()
// mpi communicate coefficients
void mpi_spmatrix()
{
43
std::map<int, std::set<global_int_t> > sendList, receiveList;
44
// cpu_rank ---> GlobalRows
45
46
typedef std::map<int, std::set<global_int_t> >: iterator map_iter;
47
typedef std::set<global_int_t>::iterator set_iter;
48
49
std::map<local_int_t, local_int_t> externalToLocalMap;
50
51
for(local_int_t i=0; i<localNumberOfRows; i++)
52
{
53
global_int_t currentGlobalRow = localToGlobalMap[i];
54
for(int j=0; j<nonzerosInRow[i];j++)
55
{
56
global_int_t curIndex = mtxIndG[i][j];
57
int rankIdOfColumnEntry = ComputeRankOfMatrixRow();
58
if(A.geom->rank != rankIdOfColumnEntry)
59 {
//it comes from another procs
60
receiveList[rankIdOfColumnEntry].insert(curIndex);
61
sendList[rankOfColumnEntry].insert(currentGlobalRow);
62
}
63
}
64
}
65
66
//count number of matrix entries to send and receive
67
local_int_t totalToBeSent = 0;
68
for(map_iter curNeighbor = sendList.begin(); curNeighbor != sendList.end(); ++curNeighbor)
69
totalToBeSent += (curNeighbor->second).size();
70
71
local_int_t totalToBeReceived = 0;
72
for(map_iter curNeighbor = receiveList.begin(); curNeighbor != receiveList.end(); ++curNeighbor)
73
totalToBeReceived += (curNeighbor->second).size();
74
75
//arrays and lists needed by ExchangeHalo function
76
double* sendBuffer =
new double[totalToBeSent];
77
local_int_t* elementsToSend = new local_int_t[totalToBeSent];
78
int* neighbors =
new int[sendList.size()];
79
local_int_t* receiveLength = new local_int_t[receiveList.size()];
80
local_int_t* sendLength = new local_int_t[sendList.size()];
81
82
int neighborCount =
0;
83
local_int_t receiveEntryCount = 0;
84
local_int_t sendEntryCount = 0;
85
86
for(map_iter curNeighbor=receiveList.begin(), curNeighbor != receiveList.end(); curNeighbor++, neighborCount++)
87
{
88
int neighborId = curNeighbor->first;
89
neighbors[neighborCount] = neighborId;
90
receiveLength[neighborCount] = receiveList[neighborId].size();
91
sendLength[neighborCount] = sendList[neighborId].size();
92
93
for(set_iter i=receiveList[neighborId].begin(); i != receiveList[neighborid].end(); i++, receiveE
ntryCount++)
94
{
95
// *i is global_int, while defination of externalToLocalMap is lcoal to local map ??
96
externalToLocalMap[*i] = localNumberOfRows + receiveEntryCount;
97
}
98
99
for(set_iter i=sendList[neighborId].begin(); i!=sendList[neighbor].end(); i++, sendEntryCount++)
100
{
101
elementsToSend[sendEntryCount] = A.globalToLocalMap[*i];
102
}
103
}
104
105
//update local index
}
相关文章推荐
- distributed sparse matrix SPMV(分布式稀疏矩阵的spmv操作)
- 数据结构_稀疏矩阵转置(trans_sparse_matrix)
- 稀疏矩阵(Sparse Matrix)
- matlab 稀疏矩阵(sparse matrix)
- 稀疏矩阵(sparse matrix)
- Sparse Matrix(稀疏矩阵三元组表示,三元组形式的矩阵转置)
- 稀疏矩阵的存储格式(Sparse Matrix Storage Formats)
- 稀疏矩阵的存储格式(Sparse Matrix Storage Formats)
- 稀疏矩阵(sparse matrix)
- 稀疏矩阵的存储格式(Sparse Matrix Storage Formats)
- Google分布式系统三大论文(二)Bigtable: A Distributed Storage System for Structured Data
- Data-Driven Sparse Structure Selection for Deep Neural Networks 论文翻译
- scipy.sparse中csc_martrix和csr_matrix两个稀疏矩阵的区别
- 三元组实现稀疏矩阵的压缩存储与转置 (Sparse matrix compression storage and transposition base on triple)
- 稀疏表达:向量、矩阵与张量(上)(转载自http://www.cvchina.info/2010/06/01/sparse-representation-vector-matrix-tensor-1)
- 稀疏矩阵的存储格式(Sparse Matrix Storage Formats)
- SparseMatrix稀疏矩阵
- Best data structure for dictionary implementation
- Bigtable: A Distributed Storage System for Structured Data : part9 Lessons
- 数据结构----稀疏矩阵的快速转置