您的位置:首页 > 其它

MPI用MPI_Send, MPI_Recv实现MPI_Alltoall的块方法

2014-10-09 16:35 1626 查看
用了一个多小时,终于搞定用MPI_Send, MPI_Recv实现MPI_Alltoall,网上的类似的源码都是转置单个元素,想按块转置真不容易。首先科普一下什么是MPI_Alltoall, MPI的n个进程,一个进程有n个块,如果块只包含一个元素,则形成N*N的矩阵,MPI_Alltoall要做的是转置这个矩阵,即第i
个进程的第j块发送到第j个进程的第i块。废话不多,上源码。
#include "mpi.h"
#include "stdio.h"

int My_Alltoall(void *sendBuffer,int sendcnt,MPI_Datatype sendtype,
void *receiveBuffer,int recvcnt,MPI_Datatype recvtype,MPI_Comm comm,int rank,int size)
{
int i,j;
MPI_Status status;

for(i=0;i<size;i++)
{
if(rank==i)
{
MPI_Sendrecv(sendBuffer+(sendcnt*i)*sizeof(sendtype),sendcnt,sendtype,i,99,receiveBuffer+(recvcnt*i)*sizeof(recvtype),recvcnt,recvtype,i,99,comm,&status);
}
}

for(i=0;i<size;i++)
{
if(rank!=i)
{
MPI_Send(sendBuffer+(sendcnt*i)*sizeof(sendtype),sendcnt,sendtype,i,i,comm);
}
}

for(i=0;i<size;i++)
{
if(rank!=i)
{
MPI_Recv(receiveBuffer+(recvcnt*i)*sizeof(recvtype),recvcnt,recvtype,i,rank,comm,&status);
}
}

return 1;

}

int main (int argc, char *argv[])
{
int i, myrank, size;
int *send_buffer;
int *recv_buffer;

MPI_Init (&argc, &argv);
MPI_Comm_rank (MPI_COMM_WORLD, &myrank);
MPI_Comm_size (MPI_COMM_WORLD, &size);

size = size *2;

send_buffer = (int *) calloc (size, sizeof (int));
recv_buffer = (int *) calloc (size, sizeof (int));

for (i = 0; i < size; i++)
send_buffer[i] = i+myrank*size;

My_Alltoall (send_buffer, 2, MPI_INT,
recv_buffer, 2, MPI_INT, MPI_COMM_WORLD,myrank,size/2);

for(i=0;i<size;i++){
printf("myrank=%d,sendbuf[%d]=%d\n",myrank,i,send_buffer[i]);
}

for (i = 0; i < size; i++)
printf ("myrank = %d, recv_buffer[%d] = %d\n", myrank, i, recv_buffer[i]);

free (recv_buffer);
free (send_buffer);
MPI_Finalize ();
return 0;
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: