XGCa
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
my_mpi.hpp
Go to the documentation of this file.
1 #ifndef MY_MPI_HPP
2 #define MY_MPI_HPP
3 
4 #include <mpi.h>
5 #include <vector>
6 extern bool my_debug_flag;
7 extern MPI_Comm SML_COMM_WORLD;
8 extern int SML_COMM_RANK;
9 
10 void create_comm_world(int color);
11 void destroy_comm_world();
12 
13 /* To illustrate the different communicators: Imagine SML_COMM_WORLD has 12 ranks:
14  * 0 1 2 3 4 5 6 7 8 9 10 11
15  */
16 
17 struct MyMPI{
18  // General communicator
19  MPI_Comm comm;
20  MPI_Group group;
21  int nranks;
22  int my_rank;
23  /* Reorders SML_COMM_WORLD e.g. (with 12 ranks, 4 planes):
24  * 0 3 6 9
25  * 1 4 7 10
26  * 2 5 8 11
27  * So e.g. processes with ranks 0 and 3 in SML_COMM_WORLD are reordered to be
28  * consecutive in comm
29  * Not sure why this transpose is done; maybe the order affects some MPI algorithms
30  * but not clear when/why this would be better.
31  * Also, in the Fortran code comm overwrites SML_COMM_WORLD in the setup (both
32  * called sml_comm), so something similar could be done here. - ALS
33  */
34 
35  // Intraplane communicator
36  MPI_Comm plane_comm;
39  /* Takes a column from comm, e.g. (with 12 ranks, 4 planes, this is rank 4):
40  * 3
41  * 4
42  * 5
43  */
44 
45  // Interplane communicator
46  MPI_Comm intpl_comm;
49  /* Takes a row from comm, e.g. (with 12 ranks, 4 planes, this is rank 4):
50  *
51  * 1 4 7 10
52  *
53  */
54 
55  MyMPI(){}
56 
57  MyMPI(const MPI_Comm& comm_world, int nplanes){
58  // Get total number of processors
59  MPI_Comm_size( comm_world, &nranks );
60 
61  // Assumes check that nranks is divisible by nplanes has already happened - move check here?
62  int ranks_per_plane = nranks/nplanes;
63 
64  if(false/*sml_plane_major*/){
65  // get the group underlying sml_comm
66  //mpi_comm_group(sml_comm, sml_comm_group, ierr)
67  }else{
68  // redefine sml_comm pe ordering from consecutive within planes
69  // (Plane_Major) to consecutive across planes (Interplane-Major)
70  std::vector<int> new_sml_comm_ranks(nranks);
71  int k = 0;
72  for(int j=0; j<ranks_per_plane; j++){
73  for(int i=0; i<nplanes; i++){
74  new_sml_comm_ranks[ranks_per_plane*i + j] = k;
75  k++;
76  }
77  }
78 
79  // get the group underlying sml_comm (== mpi_comm_world)
80  // Use sml_comm_world instead of MPI_COMM_WORLD (for MPMD/staging execution)
81  MPI_Group mpi_comm_world_group;
82  MPI_Comm_group(comm_world, &mpi_comm_world_group);
83 
84  // create new group permuting sml_comm pe ordering to Interplane-Major
85  MPI_Group_incl(mpi_comm_world_group, nranks, new_sml_comm_ranks.data(), &group);
86 
87  // Create the new communicator
88  MPI_Comm_create(comm_world, group, &comm);
89  MPI_Comm_rank( comm, &my_rank );
90  }
91 
92  // INTRA-PLANE MPI COMMUNICATOR
93  //checkpoint("\nPlane mpi communication");
94  int plane_0_pe=int(my_rank/ranks_per_plane)*ranks_per_plane;
95  std::vector<int> plane_ranks(ranks_per_plane);
96  for(int j=0; j<ranks_per_plane; j++){
97  plane_ranks[j]=plane_0_pe + j;
98  }
99 
100  // Create the new plane group
101  MPI_Group plane_group;
102  MPI_Group_incl(group, ranks_per_plane, plane_ranks.data(), &plane_group);
103 
104  // Create the new plane communicator
105  MPI_Comm_create(comm, plane_group, &plane_comm);
106 
107  //call mpi_comm_size(plane_comm, plane_nranks, ierr)
108  MPI_Comm_rank(plane_comm, &my_plane_rank);
109  MPI_Comm_size(plane_comm, &n_plane_ranks);
110 
111  // INTER-PLANE MPI COMMUNICATOR
112  //checkpoint("\nInter-plane mpi communication");
113  std::vector<int> intpl_ranks(nplanes);
114  for(int i=0; i<nplanes; i++){
115  intpl_ranks[i]=my_plane_rank + i*ranks_per_plane;
116  }
117 
118  // Create the new inter-plane group
119  MPI_Group intpl_group;
120  MPI_Group_incl(group, nplanes, intpl_ranks.data(), &intpl_group);
121 
122  // Create the new inter-plane communicator
123  MPI_Comm_create(comm, intpl_group, &intpl_comm);
124 
125  MPI_Comm_rank(intpl_comm, &my_intpl_rank);
126  MPI_Comm_size(intpl_comm, &n_intpl_ranks );
127 
128  /*
129  call check_point('adios mpi communication?')
130  do i=0, sml_intpl_nranks-1
131  adios_ranks(i)= i*ranks_per_plane
132  enddo
133  call mpi_group_incl(sml_comm_group, sml_intpl_nranks, adios_ranks, sml_adios_group, ierr)
134  call mpi_comm_create(sml_comm, sml_adios_group, sml_adios_comm, ierr)
135 
136 
137  //#ifndef NO_TASKMAP
138  // output task-to-node mapping
139  write(c_color,'(i8)') sml_comm_color
140  if (sml_mype .eq. 0) then
141  open(unit=14, file='TASKMAP_Color'//trim(adjustl(c_color))//'.txt', &
142  status='OLD', access='SEQUENTIAL', position='APPEND' )
143  endif
144  call taskmap_write(14, sml_comm, &
145  'SML_COMM COLOR #'//trim(adjustl(c_color)), sml_plane_mype, sml_intpl_mype, .false.)
146  if (sml_mype .eq. 0) close(14)
147  //#endif
148  //#ifdef XGC1
149  //jyc print mpi placement info
150  if (sml_verbose) then
151  call mpi_get_processor_name(nodename, len, ierr)
152  print *, sml_mype, 'Process (plane,node,color):', sml_plane_index, trim(nodename), ' ', sml_comm_color
153  endif
154  //#endif
155  */
156  }
157 };
158 
159 #endif
MPI_Comm SML_COMM_WORLD
Definition: my_mpi.cpp:4
MPI_Comm plane_comm
Definition: my_mpi.hpp:36
MyMPI(const MPI_Comm &comm_world, int nplanes)
Definition: my_mpi.hpp:57
bool my_debug_flag
Definition: my_mpi.cpp:7
int my_intpl_rank
Definition: my_mpi.hpp:47
int my_rank
Definition: my_mpi.hpp:22
Definition: my_mpi.hpp:17
void create_comm_world(int color)
Definition: my_mpi.cpp:14
MPI_Comm comm
Definition: my_mpi.hpp:19
int my_plane_rank
Definition: my_mpi.hpp:37
int n_plane_ranks
Definition: my_mpi.hpp:38
int SML_COMM_RANK
Definition: my_mpi.cpp:5
MyMPI()
Definition: my_mpi.hpp:55
MPI_Comm intpl_comm
Definition: my_mpi.hpp:46
void destroy_comm_world()
Definition: my_mpi.cpp:29
MPI_Group group
Definition: my_mpi.hpp:20
int nranks
Definition: my_mpi.hpp:21
int n_intpl_ranks
Definition: my_mpi.hpp:48