您的位置:首页 > 其它

MPI(Message-Passing Interface)实现奇偶排序

2017-12-15 18:59 78 查看
MPI奇偶排序的实现:



各个进程拿到自己需要做排序那一部分数据时即需要需要的总数除进程数(其中有不能整除问题,对于不能整除的进行填补,用最大的数填充数组最后几个元素使之能整除),先进行局部排序。第一轮偶排序时,0和进程和1号交换数据,2和3交换,进程号小的保留数据小的那一半。进行奇排序时,1和2进行交换。0和3号进程闲置。以此类推,进行p个阶段。这个根据定理。参考书籍:Introduction-ParallelProgramming。作者Peter-Pacheco。

#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <memory.h>
#include <time.h>
#include <stdlib.h>

//int array[] = {15, 11, 9, 16, 3, 14, 8, 7, 4, 6, 12, 10, 5, 2, 13, 1};
void merge(int mykeys[], int receive[], int n, int flag);
void odd_even_sort(int a[], int n);
void doSort(int myid, int local_n, int np);
void printMatrix(int array[], int n);
void init(int n, int myid, int np);
int getPartner(int phase, int myid, int comm_sz);
int cmp(const void *a, const void *b);

int *array, *mykeys, *receive;
int n, partner;

int main(int argc, char **argv)
{
int i, j;
int myid, np, namelen;
char proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Comm_size(MPI_COMM_WORLD, &np);
MPI_Get_processor_name(proc_name, &namelen);

double beginTime, endTime;
beginTime = MPI_Wtime();

n = atoi(argv[1]);
int local_n = (n + (np - n % np)) / np; // fill with the matrix
//初始化随机数。
init(n, myid, np);

// 2. 把数据广播出去。
MPI_Scatter(array, local_n, MPI_INT, mykeys, local_n, MPI_INT, 0, MPI_COMM_WORLD);

// 3. local sort
qsort(mykeys, local_n, sizeof(int), cmp);

// 4. 进行np轮排序。
doSort(myid, local_n, np);

// 5. 0号进程收集排序好的数据
MPI_Gather(mykeys, local_n, MPI_INT, array, local_n, MPI_INT, 0, MPI_COMM_WORLD);
endTime = MPI_Wtime();
if (myid == 0)
{
printMatrix(array, n);
printf("spent time = %lf second\n", endTime - beginTime);
}

free(array);
free(mykeys);
free(receive);
MPI_Finalize();
}

void merge(int mykeys[], int receive[], int n, int flag)
{

int mi, ti, ri;
int *temp = malloc(sizeof(int) * n * 2);
if (temp == NULL)
{
exit(-1);
}

mi = ri = ti = 0;
while (mi < n && ri < n)
{
if (mykeys[mi] >= receive[ri])
{
temp[ti] = receive[ri];
ri++;
ti++;
}
else
{
temp[ti] = mykeys[mi];
ti++;
mi++;
}
}

while (mi < n)
{
temp[ti] = mykeys[mi];
ti++;
mi++;
}
while (ri < n)
{
temp[ti] = receive[ri];
ti++;
ri++;
}

ti = flag > 0 ? n : 0;

for (mi = 0; mi < n; mi++)
mykeys[mi] = temp[ti++];
free(temp);
}
void printMatrix(int array[], int n)
{
int i;
for (i = 0; i < n; i++)
printf("%d\n", array[i]);
}
void doSort(int myid, int local_n, int np)
{
int i;
for (i = 0; i < np; i++)
{
partner = getPartner(i, myid, np);
if (partner != MPI_PROC_NULL)
{
MPI_Sendrecv(mykeys, local_n, MPI_INT, partner, 0, receive, local_n, MPI_INT, partner, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
merge(mykeys, receive, local_n, myid - partner);
}
}
}
void init(int n, int myid, int np)
{
int i;
int total = n + np - n % np;
if (!myid)
{
srand(time(NULL));
array = (int *)malloc(sizeof(int) * total);
for (i = 0; i < n; i++)
{
*(array + i) = random();
}

for (i = n; i < total; i++)
{
*(array + i) = 0x7fffffff;
}
}
receive = (int *)malloc(sizeof(int) * total / np);
mykeys = (int *)malloc(sizeof(int) * total / np);
}

int getPartner(int phase, int myid, int comm_sz)
{
int partner;
if (phase % 2 == 0)
{
if (myid % 2 != 0)
{
partner = myid - 1;
}
else
{
partner = myid + 1;
}
}
else
{
if (myid % 2 != 0)
{
partner = myid + 1
9a9e
;
}
else
{
partner = myid - 1;
}
}
if (partner == -1 || partner == comm_sz)
{
partner = MPI_PROC_NULL;
}
return partner;
}
int cmp(const void *a, const void *b)
{
return *((int *)a) > *((int *)b);
}


编译:mpicc psort.c -o psort

运行:mpirun -np 10 psort 10000

开十个进程排序10000个数。
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  并行计算