manager-spawn.c 1.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950
  1. /* Code Example from http://mpi-forum.org/docs/mpi-2.0/mpi-20-html/node98.htm */
  2. #include "mpi.h"
  3. #include <stdio.h>
  4. int main(int argc, char *argv[]) {
  5. int world_size, universe_size, *universe_sizep, flag;
  6. MPI_Comm everyone; /* intercommunicator */
  7. char worker_program[100];
  8. MPI_Init(&argc, &argv);
  9. MPI_Comm_size(MPI_COMM_WORLD, &world_size);
  10. if (world_size != 1)
  11. printf("Top heavy with management\n\n");
  12. MPI_Attr_get(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &universe_sizep, &flag);
  13. if (!flag) {
  14. printf("This MPI does not support UNIVERSE_SIZE. \n How many processes "
  15. "total?");
  16. scanf("%d", &universe_size);
  17. } else {
  18. universe_size = *universe_sizep;
  19. printf("Universe size = %d \n", universe_size);
  20. }
  21. if (universe_size == 1)
  22. printf("No room to start workers SO I SET IT TO 5 \n");
  23. /*
  24. * Now spawn the workers. Note that there is a run-time determination
  25. * of what type of worker to spawn, and presumably this calculation must
  26. * be done at run time and cannot be calculated before starting
  27. * the program. If everything is known when the application is
  28. * first started, it is generally better to start them all at once
  29. * in a single MPI_COMM_WORLD.
  30. */
  31. universe_size = 5;
  32. /* choose_worker_program(worker_program); */
  33. MPI_Comm_spawn("./worker_program", MPI_ARGV_NULL, universe_size - 1,
  34. MPI_INFO_NULL, 0, MPI_COMM_SELF, &everyone,
  35. MPI_ERRCODES_IGNORE);
  36. /*
  37. * Parallel code here. The communicator "everyone" can be used
  38. * to communicate with the spawned processes, which have ranks 0,..
  39. * MPI_UNIVERSE_SIZE-1 in the remote group of the intercommunicator
  40. * "everyone".
  41. */
  42. MPI_Finalize();
  43. return 0;
  44. }