使用了MPI 和OpenMP混合编程,非常简短的目的。(使用的MPI是mpich3.1)
进程0,首先接收来自其他进程的消息,然后发送消息给其他进程。
其他进程:开了两个OpenMP线程。
在sendrecvtest4中,由线程1发送接收,线程0没做什么;在sendrecvtest5中,线程0接收,线程1发送。
这两个程序编译后,在单机运行都正常。在多机条件下,4仍然正常运行。但是5会各种终止。我用两台机器(共8核)测试时,5的运行时错误的结果如下:
极少数情况正常运行。
说明:单机情况下均正常运行,附图
编译:mpicc -o sendrecv4 sendrecvtest4.c -fopenmp
//sendrecvtest4.c
#include<stdio.h>
#include<unistd.h>
#include<stdlib.h>
#include<error.h>
#include <mpi.h>
#include <omp.h>
#include <time.h>
#define TAG 1
#define TAG2 2
int main(int argc, char * argv[])
{
clock_t starttime, endtime;
double totaltime;
starttime = clock();
int namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
//MPI_Get_processor_name(processor_name,&namelen);
int rank,size;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);
MPI_Get_processor_name(processor_name,&namelen);
MPI_Request recv_request;
MPI_Request send_request;
MPI_Status status;
int found = 0;
int found2 = 0;
int flag = 0;
int foundmyself = 0;
if(rank == 0)
{
int rankindex;
for(rankindex = 1;rankindex < size ;rankindex++)
MPI_Recv(&found,1,MPI_INT,MPI_ANY_SOURCE,TAG,MPI_COMM_WORLD,&status);
for(rankindex = 1;rankindex < size ;rankindex++)
MPI_Send(&found2,1,MPI_INT,rankindex,TAG2,MPI_COMM_WORLD);
}
else //rank != 0
{
#pragma omp parallel num_threads(2)
{
int i = omp_get_thread_num();
if(i==0)
{
;
//MPI_Recv(&found2,1,MPI_INT,0,TAG2,MPI_COMM_WORLD,&status);
//printf("Slave receive success\n");
}
else {
MPI_Send(&found,1,MPI_INT,0,TAG,MPI_COMM_WORLD);
printf("Slave Send success!\n");
MPI_Recv(&found2,1,MPI_INT,0,TAG2,MPI_COMM_WORLD,&status);
printf("Slave receive success\n");
}// thread 1
}
}//searching process
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
//sendrecv5.c
#include<stdio.h>
#include<unistd.h>
#include<stdlib.h>
#include<error.h>
#include <mpi.h>
#include <omp.h>
#include <time.h>
#define TAG 1
#define TAG2 2
int main(int argc, char * argv[])
{
clock_t starttime, endtime;
double totaltime;
starttime = clock();
int namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
//MPI_Get_processor_name(processor_name,&namelen);
int rank,size;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);
MPI_Get_processor_name(processor_name,&namelen);
MPI_Request recv_request;
MPI_Request send_request;
MPI_Status status;
int found = 0;
int found2 = 0;
int flag = 0;
int foundmyself = 0;
if(rank == 0)
{
int rankindex;
for(rankindex = 1;rankindex < size ;rankindex++)
MPI_Recv(&found,1,MPI_INT,MPI_ANY_SOURCE,TAG,MPI_COMM_WORLD,&status);
for(rankindex = 1;rankindex < size ;rankindex++)
MPI_Send(&found2,1,MPI_INT,rankindex,TAG2,MPI_COMM_WORLD);
}
else //rank != 0
{
#pragma omp parallel num_threads(2)
{
int i = omp_get_thread_num();
if(i==0)
{
MPI_Recv(&found2,1,MPI_INT,0,TAG2,MPI_COMM_WORLD,&status);
printf("Slave receive success\n");
}
else {
MPI_Send(&found,1,MPI_INT,0,TAG,MPI_COMM_WORLD);
printf("Slave Send success!\n");
//MPI_Recv(&found2,1,MPI_INT,0,TAG2,MPI_COMM_WORLD,&status);
//printf("Slave receive success\n");
}// thread 1
}
}//searching process
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}