标签:
author: Menglong TAN; email: tanmenglong_at_gmail; twitter/weibo: @crackcell; source:http://blog.crackcell.com/posts/2013/07/15/mpi_quick_start.html.
不知道为啥,MPI的入门教程似乎很少,也不太明了。今天看了一些教程,整理一下入门需要知道的知识点。
环境:debian sid 安装开发环境:
$ sudo apt-get install openmpi-bin openmpi-doc libopenmpi-dev gcc g++
#include <iostream>
#include <mpi/mpi.h>
using namespace std;
int main(int argv, char* argc[]){
MPI_Init(&argv, &argc);
cout << "hello world" << endl;
MPI_Finalize();
return 0;
}
编译:
$ mpicxx -o hello.exe hello.cpp
运行:
$ mpirun -np 10 ./hello.exe
我们来看代码,MPI程序的结构一般是:
#include <mpi.h>
int MPI_Init(int *argc, char ***argv)
#include <mpi.h>
int MPI_Initialized(int *flag)
#include <mpi.h>
int MPI_Finalize()
获取一个communicator中的进程数
#include <mpi.h>
int MPI_Comm_size(MPI_Comm comm, int *size)
如果communicator是MPI_COMM_WORLD,那就是当前程序能用的所有进程数
#include <mpi.h>
int MPI_Comm_rank(MPI_Comm comm, int *rank)
#include <mpi.h>
int MPI_Get_processor_name(char *name, int *resultlen)
#include <mpi.h>
int MPI_Abort(MPI_Comm comm, int errorcode)
#include <stdio.h>
#include <mpi/mpi.h>
int main(int argc, char *argv[]) {
char hostname[MPI_MAX_PROCESSOR_NAME];
int task_count;
int rank;
int len;
int ret;
ret = MPI_Init(&argc, &argv);
if (MPI_SUCCESS != ret) {
printf("start mpi fail\n");
MPI_Abort(MPI_COMM_WORLD, ret);
}
MPI_Comm_size(MPI_COMM_WORLD, &task_count);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Get_processor_name(hostname, &len);
printf("task_count = %d, my rank = %d on %s\n", task_count, rank, hostname);
MPI_Finalize();
return 0;
}
运行一下:
$ mpirun -np 3 ./hello3.exe task_count = 3, my rank = 0 on crackcell-vm0 task_count = 3, my rank = 1 on crackcell-vm0 task_count = 3, my rank = 2 on crackcell-vm0
为了可移植性,MPI定义了自己的消息数据类型,具体参考1
int MPI_Send(void *buf, int count, MPI_Datatype datatype, int dest,
int tag, MPI_Comm comm)
int MPI_Send(void *buf, int count, MPI_Datatype datatype, int dest,
int tag, MPI_Comm comm)
int MPI_Recv(void *buf, int count, MPI_Datatype datatype,
int source, int tag, MPI_Comm comm, MPI_Status *status)
int MPI_Irecv(void *buf, int count, MPI_Datatype datatype,
int source, int tag, MPI_Comm comm, MPI_Request *request)
#include <stdio.h>
#include <mpi/mpi.h>
int main(int argc, char *argv[]) {
int task_count;
int rank;
int dest;
int src;
int count;
int tag = 1;
char in_msg;
char out_msg = ‘x‘;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &task_count);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (0 == rank) {
dest = 1;
src = 1;
// 向1发送一个字符,然后等待返回
MPI_Send(&out_msg, 1, MPI_CHAR, dest, tag, MPI_COMM_WORLD);
MPI_Recv(&in_msg, 1, MPI_CHAR, src, tag, MPI_COMM_WORLD, &status);
} else if (1 == rank) {
dest = 0;
src = 0;
// 向0发送一个字符,然后等待返回
MPI_Recv(&in_msg, 1, MPI_CHAR, src, tag, MPI_COMM_WORLD, &status);
MPI_Send(&out_msg, 1, MPI_CHAR, dest, tag, MPI_COMM_WORLD);
}
MPI_Get_count(&status, MPI_CHAR, &count);
printf("task %d: recv %d char(s) from task %d with tag %d\n",
rank, count, status.MPI_SOURCE, status.MPI_TAG);
MPI_Finalize();
return 0;
}
#include <mpi.h>
int MPI_Barrier(MPI_Comm comm)
#include <mpi.h>
int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
int root, MPI_Comm comm)
#include <mpi.h>
int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype, int root,
MPI_Comm comm)
#include <mpi.h>
int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype, int root,
MPI_Comm comm)
更多API参考2。
group用来组织一组进程,communicator用来关联他们之前的通信关系。
标签:
原文地址:http://www.cnblogs.com/gkwang/p/4337679.html