您的位置:首页 > 其它

MPI学习-聚合通信

2015-11-23 20:23 453 查看
聚合通信:

同一通信器内的所有进程都参加;

所有进程的函数调用形式相同;

阻塞通信方式;

不需要tag

全局通信函数

MPI_Bcast()

MPI_Scatter()

MPI_Gather()

MPI_Allgather()

MPI_Alltoall()

全局规约函数

MPI_Reduce()

MPI_Allreduce()

MPI_Reduce_scatter()

MPI_Scan()

同步函数

MPI_Barrier()

例如:计算pi:

/**
*pi=1/(1+X^2)在[0,1]上积分
* 计算pi=1/N*sum(4/(1+x^2)).
*/
#include "stdafx.h"
#include "mpi.h"
#include <stdio.h>
#include <math.h>
#include <iostream>
using namespace std;
double f(double a)
{
return (4.0 / (1.0 + a * a));
}

int main(int argc, char *argv[])
{
int myrank, nprocs, i, N, n, end;
double PI25DT = 3.141592653589793238462643;
double mypi, pi, h, sum, x;
double startwtime = 0.0, endwtime;
int namelen;
//char processor_name[MPI_MAX_PROCESSOR_NAME];

MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
//MPI_Get_processor_name(processor_name, &namelen);

// fprintf (stderr, "Process %d on %s\n", myrank, processor_name);
MPI_Barrier(MPI_COMM_WORLD);
if (myrank == 0)
{
// 设置剖分区域数.
cout << "Enter the number of intervals: " << endl;
cin >> N;
startwtime = MPI_Wtime();
}

// 进程0将剖分区域的数目广播出去.
MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD);

if (N > 0)
{
h = 1.0 / (double)N;
sum = 0.0;

// 以块的方式分配任务.
n = N / nprocs + 1;
end = ((myrank + 1) * n) > N ? N : ((myrank + 1) * n);
for (i = myrank * n + 1; i <= end; i++)
{
x = h * ((double)i - 0.5);
sum += f(x);
}
mypi = h * sum;

// 将计算结果规约到进程0.
MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

if (myrank == 0)
{
printf("pi is approximately %.16f, Error is %.16f\n",
pi, fabs(pi - PI25DT));
endwtime = MPI_Wtime();
printf("wall clock time = %f\n", endwtime - startwtime);
}
}
MPI_Finalize();

return 0;
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: