From eb7a5cd6227d45574215145fa876e997191cf294 Mon Sep 17 00:00:00 2001 From: Tiago Pereira Date: Wed, 18 Jun 2014 13:54:27 +0100 Subject: [PATCH] Using MPI_Ssend instead of MPI_Send MPI_Send was giving buffer issues on pleiades. Ssend seems much faster. --- rh15d_mpi/rh15d_ray_pool.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rh15d_mpi/rh15d_ray_pool.c b/rh15d_mpi/rh15d_ray_pool.c index 90eaf9a..29efc18 100644 --- a/rh15d_mpi/rh15d_ray_pool.c +++ b/rh15d_mpi/rh15d_ray_pool.c @@ -219,7 +219,7 @@ void overlord(void) { MPI_COMM_WORLD, &status); /* Tell all the drones to exit by sending an empty message with the DIETAG. */ for (rank = 1; rank <= mpi.size; ++rank) { - MPI_Send(0, 0, MPI_INT, rank, DIETAG, MPI_COMM_WORLD); + MPI_Ssend(0, 0, MPI_INT, rank, DIETAG, MPI_COMM_WORLD); } } else { /* Seed the drones; send one unit of work to each drone. */ @@ -236,7 +236,7 @@ void overlord(void) { */ /* Send it to each rank */ - MPI_Send(¤t_task, /* message buffer */ + MPI_Ssend(¤t_task, /* message buffer */ 1, /* one data item */ MPI_LONG, /* data item is an integer */ rank, /* destination process rank */ @@ -256,7 +256,7 @@ void overlord(void) { MPI_COMM_WORLD, /* default communicator */ &status); /* info about the received message */ /* Send the drone a new work unit */ - MPI_Send(¤t_task, /* message buffer */ + MPI_Ssend(¤t_task, /* message buffer */ 1, /* one data item */ MPI_LONG, /* data item is an integer */ status.MPI_SOURCE, /* to who we just received from */ @@ -277,7 +277,7 @@ void overlord(void) { /* Tell all the drones to exit by sending an empty message with the DIETAG. */ for (rank = 1; rank <= mpi.size; ++rank) { - MPI_Send(0, 0, MPI_INT, rank, DIETAG, MPI_COMM_WORLD); + MPI_Ssend(0, 0, MPI_INT, rank, DIETAG, MPI_COMM_WORLD); } } }