|
|
|
|
Function Name | Usage |
---|---|
MPI_Isend(void *buff,
int count, MPI_Datatype type, int dest, int tag, int comm, MPI_Request *request ) |
|
MPI_Irecv(void *buff,
int count, MPI_Datatype type, int source, int tag, int comm, MPI_Request *request ) |
|
|
But sooner or later, you will need to work with the data that you have been sending/receiving asnynchronously
Function Name | Usage |
---|---|
MPI_Test(MPI_Request *request,
int *flag, MPI_Status * status) |
This function returns immediately.
The function will return the status of an ASYNCHRONOUS send/receive request The input parameter is request associated with an asynchronous send/receive operation. The return values are:
|
MPI_Wait(MPI_Request *request,
MPI_Status * status) |
This function BLOCKS (waits) until the request is completed.
The input parameter is request associated with an asynchronous send/receive operation. The function wait until that ASYNCHRONOUS send/receive request is COMPLETE
The return value is:
|
MPI_Isend(void *buff, int count, MPI_Datatype type, int dest, int tag, int comm, MPI_Request *request ) MPI_Wait( MPI_Request *request, MPI_Status * status) |
Similarly for the receive operation
int num_procs; double x[MAX]; // Input array int main(int argc, char *argv[]) { int start, stop; int myid; double my_min, others_min; // Minimum double my_max, others_max; // Maximum MPI_Init(&argc,&argv); // Initialize MPI_Comm_size(MPI_COMM_WORLD, &num_procs); // Get # processors MPI_Comm_rank(MPI_COMM_WORLD, &myid); // Get my rank (id) /* -------------------------------------- Find the min. among my numbers -------------------------------------- */ n = MAX/num_procs; start = myid * n; if ( myid != (num_procs-1) ) { stop = start + n; } else { stop = MAX; } my_min = x[start]; for (i = start+1; i < stop; i = i + n ) { if ( x[i] < my_min ) my_min = x[i]; } if ( myid == 0 ) { /* ------------------------------------- Get the min from others and compare ------------------------------------- */ for (i = 1; i < num_procs; i++) { MPI_Recv(&others_min, 1, MPI_DOUBLE, i, 0, MPI_COMM_WORLD, NULL); if ( others_min < my_min ) my_min = others_min; } } else { MPI_Send(&my_min, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD); } ************************************************************ Synchronization point: all programs are stopped here due to the synchronous send/receive operations used ************************************************************ /* -------------------------------------- Now find the max. among my numbers -------------------------------------- */ my_max = x[start]; for (i = start+1; i < stop; i = i + n ) { if ( x[i] > my_max ) my_max = x[i]; } if ( myid == 0 ) { /* ------------------------------------- Get the max from others and compare ------------------------------------- */ for (i = 1; i < num_procs; i++) { MPI_Recv(&others_max, 1, MPI_DOUBLE, i, 0, MPI_COMM_WORLD, NULL); if ( others_max > my_max ) my_max = others_max; } } else { MPI_Send(&my_max, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD); } MPI_Finalize(); } |
Demo instruction:
The synchronization will cause some processors to idle (waste time)
Make sure you use different buffers in every ASYNCHRONOUS communication that have not yet completed !!! |
int num_procs; double x[MAX]; // Input array int main(int argc, char *argv[]) { int start, stop; int myid; double my_min; double others_min[100]; // Save minimum separately double my_max; double others_max[100]; // Save maximum separately MPI_Request rq_min[100], rq_max[100]; // Status variables MPI_Init(&argc,&argv); // Initialize MPI_Comm_size(MPI_COMM_WORLD, &num_procs); // Get # processors MPI_Comm_rank(MPI_COMM_WORLD, &myid); // Get my rank (id) /* -------------------------------------- Find the min. among my numbers -------------------------------------- */ n = MAX/num_procs; start = myid * n; if ( myid != (num_procs-1) ) { stop = start + n; } else { stop = MAX; } my_min = x[start]; for (i = start+1; i < stop; i = i + n ) { if ( x[i] < my_min ) my_min = x[i]; } if ( myid == 0 ) { /* ------------------------------------- Get the min from others and compare ------------------------------------- */ for (i = 1; i < num_procs; i++) { MPI_Irecv(&others_min[i], 1, MPI_DOUBLE, i, 0, MPI_COMM_WORLD, &rq_min[i]); } } else { MPI_Isend(&my_min, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &rq_min[0]); } ***************************************************************** NO Synchronization: all programs proceeds to the next stage of computation independently and will not wait on each other due to the asynchronous send/receive operations used ***************************************************************** /* -------------------------------------- Now find the max. among my numbers -------------------------------------- */ my_max = x[start]; for (i = start+1; i < stop; i = i + n ) { if ( x[i] > my_max ) my_max = x[i]; } if ( myid == 0 ) { /* ------------------------------------- Get the max from others and compare ------------------------------------- */ for (i = 1; i < num_procs; i++) { MPI_Irecv(&others_max[i], 1, MPI_DOUBLE, i, 0, MPI_COMM_WORLD, &rq_max[i]); } } else { MPI_Isend(&my_max, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &rq_min[0]); } ***************************************************************** Now we must gather the min[] and max[] from different processes HERE we must synchronize (we have no choice) That's OK, we are at the END of the processing ! Waiting for others to finish at the END to complete the whole job is a necessity, it's not wasting time ***************************************************************** /* -------------------------------------- Now synchronize to compute results -------------------------------------- */ if ( myid == 0 ) { // The Master processor waits for all min[] and max[] // messages to arrive for ( i = 1; i < num_procs; i++) { MPI_Wait( &rq_min[i], NULL ); if ( others_min[i] < my_min ) my_min = others_min[i]; } for ( i = 1; i < num_procs; i++) { MPI_Wait( &rq_max[i], NULL ); if ( others_max[i] > my_max ) my_max = others_max[i]; } // Print result.... cout << "min = " << my_min << endl << endl; cout << "max = " << my_max << endl << endl; } else { // The other processes must wait until their messages // has been received before exiting !!! MPI_Wait( &rq_min[0], NULL ); MPI_Wait( &rq_max[0], NULL ); } MPI_Finalize(); } |
Demo instruction: