jmbatto 2 months ago
parent
commit
a4df80d4ee
4 changed files with 219 additions and 0 deletions
  1. 28 0
      Makefile
  2. 71 0
      Matrix.xmptype.h
  3. 70 0
      init.c
  4. 50 0
      manager-spawn.c

+ 28 - 0
Makefile

@@ -0,0 +1,28 @@
+CC=gcc
+XMPCC=xmpcc
+LIBS=-L/usr/lib -lmpi
+INCL=-I /usr/lib/openmpi/include
+OBJM=manager-spawn.o 
+OBJW=init.o
+
+all: manager worker_program
+
+manager-spawn.o: manager-spawn.c
+	$(CC) -c $(INCL) manager-spawn.c
+
+init.o: init.c
+	$(XMPCC) -c init.c
+
+manager: $(OBJM)
+	$(CC) -o manager $(OBJM) $(LIBS) 
+
+worker_program: $(OBJW)
+	$(XMPCC) -o worker_program $(OBJW) $(LIBS)
+
+run: worker_program 
+	cp worker_program /usr/local/var/mpishare
+	mpirun -host mpihead,mpinode1,mpinode2,mpinode3 -n 4 /usr/local/var/mpishare/worker_program
+	 
+
+clean:
+	rm *.o manager worker_program

+ 71 - 0
Matrix.xmptype.h

@@ -0,0 +1,71 @@
+/* -*- mode: c++; c-file-style: "engine"; c-basic-offset: 4; indent-tabs-mode: nil -*- */
+/**
+ * @file
+ * @brief Matrix parameter wrapper for XMP
+ *
+ *
+ *
+ * 2012-01-12
+ *
+ */
+#ifndef MATRIX_XMP_TYPE_HH
+#define MATRIX_XMP_TYPE_HH 1
+
+#include <stdlib.h>
+#include <stdbool.h>
+#include <mpi.h>
+
+typedef double XMP_Matrix; /* Declaration of parameter type in XMP ( XMP_type )*/
+typedef double* Matrix; /* Declaration of parameter type for import/export functions (type) */
+
+/* */
+static MPI_Datatype Matrix_MPI_Type()
+{
+
+	return MPI_DOUBLE;
+}
+
+// param_import / export definition for types that need data distribution in XMP
+
+static bool Matrix_import(Matrix param, char* filename, const MPI_Datatype motif, const int size)
+{
+	int ack;
+	MPI_File   fh;
+	MPI_Status status;
+
+
+	ack = MPI_File_open(MPI_COMM_WORLD,filename,MPI_MODE_RDONLY,MPI_INFO_NULL,&fh);
+
+	if (ack != MPI_ERR_NO_SUCH_FILE)
+	{
+		MPI_File_set_view(fh, 0, MPI_DOUBLE, motif,"native", MPI_INFO_NULL);
+		MPI_File_read_all(fh, param, size, MPI_DOUBLE, &status);
+		MPI_File_close(&fh);
+		return true;
+	}
+
+	MPI_File_close(&fh);
+	return false;
+}
+
+static bool Matrix_export(const Matrix param, char* filename, const MPI_Datatype motif, const int size, MPI_Comm Communicator)
+{
+	int ack;
+	MPI_File   fh;
+	MPI_Status status;
+
+	ack = MPI_File_open(Communicator,filename,MPI_MODE_WRONLY | MPI_MODE_CREATE,MPI_INFO_NULL,&fh);
+
+	if (ack != MPI_ERR_NO_SUCH_FILE)
+	{
+		MPI_File_set_view(fh, 0, MPI_DOUBLE, motif ,"native", MPI_INFO_NULL);
+		MPI_File_write_all(fh, param, size, MPI_DOUBLE, &status);
+		MPI_File_close(&fh);
+		return true;
+	}
+
+	MPI_File_close(&fh);
+	return false;
+}
+
+#endif

+ 70 - 0
init.c

@@ -0,0 +1,70 @@
+#include "Matrix.xmptype.h"
+#include <mpi.h>
+#include <stdio.h>
+#include <unistd.h> // for usleep
+#include <xmp.h>
+
+#pragma xmp nodes p(2, 2)
+#pragma xmp template t(0 : 3, 0 : 3)
+#pragma xmp distribute t(block, block) onto p
+
+XMP_Matrix A[4][4];
+#pragma xmp align A[i][j] with t(j, i)
+#pragma xmp shadow A[4][4]
+
+XMP_Matrix B[4][4];
+#pragma xmp align B[i][j] with t(j, i)
+#pragma xmp shadow B[4][4]
+
+XMP_Matrix C[4][4];
+#pragma xmp align C[i][j] with t(j, i)
+
+int main(int argc, char **argv) {
+  xmp_init_mpi(&argc, &argv);
+  int rank;
+  MPI_Barrier(MPI_COMM_WORLD);
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  int i, j, n;
+  n = 4;
+  fprintf(stderr, "rank = %d ; ", rank);
+  char *processor_name = malloc(256 * sizeof(char));
+  int name_len;
+
+#pragma xmp loop(i, j) on t(j, i)
+  for (i = 0; i < n; i++) {
+    for (j = 0; j < n; j++) {
+      fprintf(stderr, "\n(%d, %d, %d) ", rank, i, j);
+      MPI_Get_processor_name(processor_name, &name_len);
+      // Afficher le résultat
+      fprintf(stderr, "\nProcesseur %d - Nom : %s \n", rank, processor_name);
+      C[i][j] = 0;
+      A[i][j] = 1;
+      B[i][j] = i * n + j + 1;
+      fprintf(stderr, "B[%d,%d]=%f rank=%d, n=%d\n", i, j, B[i][j], rank, n);
+    }
+  }
+  // Libérer la mémoire allouée
+  free(processor_name);
+#pragma xmp reflect(A)
+#pragma xmp reflect(B)
+  MPI_Barrier(MPI_COMM_WORLD);
+  usleep(100);
+
+  if (rank == 0) {
+    fprintf(stdout, "\nValeur de B[i,j] \n");
+    for (i = 0; i < n; i++) {
+      for (j = 0; j < n; j++) {
+        fprintf(stdout, "B[%d,%d]=%f rank=%d, n=%d\n", i, j, B[i][j], rank, n);
+      }
+    }
+    fprintf(stdout, "\nValeur de A[i,j] \n");
+    for (i = 0; i < n; i++) {
+      for (j = 0; j < n; j++) {
+        fprintf(stdout, "A[%d,%d]=%f rank=%d, n=%d\n", i, j, A[i][j], rank, n);
+      }
+    }
+  }
+  xmp_finalize_mpi();
+
+  return 0;
+}

+ 50 - 0
manager-spawn.c

@@ -0,0 +1,50 @@
+/* Code Example from http://mpi-forum.org/docs/mpi-2.0/mpi-20-html/node98.htm */
+#include "mpi.h"
+#include <stdio.h>
+
+int main(int argc, char *argv[]) {
+  int world_size, universe_size, *universe_sizep, flag;
+  MPI_Comm everyone; /* intercommunicator */
+  char worker_program[100];
+
+  MPI_Init(&argc, &argv);
+  MPI_Comm_size(MPI_COMM_WORLD, &world_size);
+
+  if (world_size != 1)
+    printf("Top heavy with management\n\n");
+
+  MPI_Attr_get(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &universe_sizep, &flag);
+  if (!flag) {
+    printf("This MPI does not support UNIVERSE_SIZE. \n How many processes "
+           "total?");
+    scanf("%d", &universe_size);
+  } else {
+    universe_size = *universe_sizep;
+    printf("Universe size = %d \n", universe_size);
+  }
+  if (universe_size == 1)
+    printf("No room to start workers SO I SET IT TO 5 \n");
+
+  /*
+   * Now spawn the workers. Note that there is a run-time determination
+   * of what type of worker to spawn, and presumably this calculation must
+   * be done at run time and cannot be calculated before starting
+   * the program. If everything is known when the application is
+   * first started, it is generally better to start them all at once
+   * in a single MPI_COMM_WORLD.
+   */
+  universe_size = 5;
+  /* choose_worker_program(worker_program); */
+  MPI_Comm_spawn("./worker_program", MPI_ARGV_NULL, universe_size - 1,
+                 MPI_INFO_NULL, 0, MPI_COMM_SELF, &everyone,
+                 MPI_ERRCODES_IGNORE);
+  /*
+   * Parallel code here. The communicator "everyone" can be used
+   * to communicate with the spawned processes, which have ranks 0,..
+   * MPI_UNIVERSE_SIZE-1 in the remote group of the intercommunicator
+   * "everyone".
+   */
+
+  MPI_Finalize();
+  return 0;
+}