XGCa
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
load_balance.hpp
Go to the documentation of this file.
1 #ifndef LOAD_BALANCE_HPP
2 #define LOAD_BALANCE_HPP
3 
4 #include "timer_macro.hpp"
5 #include "shift.hpp"
6 #include "count_ptl_per_node.hpp"
7 #include "view_arithmetic.hpp"
8 #include "f0_redistribute.hpp"
9 
10 // Used for original fortran load balancer
11 extern "C" void calculate_load_imbalance(double f0_cost);
12 extern "C" int assess_whether_to_rebalance_load();
13 extern "C" void reset_cost_trackers();
14 extern "C" void set_weights(int* gvid0_pid, double* ptl_count, double* f0_node_cost);
15 
16 class LoadRegion{
17  public:
18  enum class UpdateMethod{
19  NoHistory=0
20  };
21 
22  private:
23 
24  View<double*,HostType> estimated_time_per_vertex;
25  std::string region_name;
26  std::vector<std::string> timer_names;
27  bool verbose;
32 
33 #ifdef USE_MPI
34  MPI_Comm inter_period_comm;
35  MPI_Comm period_comm;
36 #endif
37  int n_periods;
40 
42 
44 
45  void update_model_no_history(const View<int*,CLayout,HostType>& current_partition, const View<double*, HostType>& all_periods_timings){
46  // Loop over ranks' timers
47  for(int i=0; i<all_periods_timings.size(); i++){
48  int node_offset = current_partition(i) - 1;
49  int next_node_offset = current_partition(i+1) - 1;
50  int nnodes = next_node_offset - node_offset;
51  // Best 0th order estimate is to assume all vertices took the same time
52  double time_per_node = all_periods_timings(i)/nnodes;
53  for(int j=0; j<nnodes; j++){
54  estimated_time_per_vertex(j + node_offset) = time_per_node;
55  }
56  }
57  }
58 
60  // Get time from camtimers (accumulated)
61  double new_accumulated_time = 0.0;
62 
63  // Loop through all timers associated with this region, and add the accumulated time for each
64  for(int i=0; i<timer_names.size(); i++){
65  double accumulated_single_timer = 0;
66  int ierr = GPTLget_wallclock(timer_names[i].c_str(), -1, &accumulated_single_timer);
67 
68  // If ierr != 0, timer is not yet defined, so the accumulated time is zero
69  if(ierr != 0) accumulated_single_timer = 0.0;
70 
71  new_accumulated_time += accumulated_single_timer;
72  }
73 
74  // Subtract previous accumulated time to get time spent since last update
75  double time_spent = new_accumulated_time - time_accumulated;
76 
77  // Save new accumulated time
78  time_accumulated = new_accumulated_time;
79 
80  return time_spent;
81  }
82 
83  void touch_timers(){
84  for(int i=0; i<timer_names.size(); i++){
85  GPTLstart(timer_names[i].c_str());
86  GPTLstop(timer_names[i].c_str());
87  }
88  }
89 
90  public:
91 
92 #ifdef USE_MPI
93  LoadRegion(int n_vertices, const MPI_Comm& inter_period_comm, const MPI_Comm& period_comm, UpdateMethod update_method, bool verbose, std::string region_name, std::vector<std::string> timer_names)
94  : model_is_initialized(false),
95  verbose(verbose),
96  time_accumulated(0.0),
97  inter_period_comm(inter_period_comm),
98  period_comm(period_comm),
99  estimated_time_per_vertex("estimated_time_per_vertex",n_vertices),
101  update_method(update_method),
102  region_name(region_name),
103  timer_names(timer_names)
104  {
105  // Touch GPTL timers to be sure they exist
106  touch_timers();
107 
108  // Initialize timer
109  reset_timer();
110 
111  // Get problem size from comms
112  MPI_Comm_rank(period_comm, &my_period_rank);
113  MPI_Comm_size(period_comm, &n_unique_ranks);
114  MPI_Comm_size(inter_period_comm, &n_periods);
115  }
116 #endif
117 
118  void reset_timer(){
119  // Reset timer by getting the time since the previous call and suppressing the output
120  double time_spent = get_time_since_previous_call();
121  }
122 
123  double get_estimated_time_per_vertex(int i) const{
124  return estimated_time_per_vertex(i);
125  }
126 
127  View<double*,HostType> get_estimated_time_per_vertex() const{
129  }
130 
132  return prediction_undershoot;
133  }
134 
137  }
138 
140  return model_is_initialized;
141  }
142 
143  // Predict performance of new partition based on model
144  double get_largest_predicted_time(const View<int*,CLayout,HostType>& proposed_partition) const{
145  double largest_time = 0.0;
146  //int largest_time_ind = 0;
147  for(int i=0; i<(proposed_partition.size()-1); i++){
148  double proc_time = 0.0;
149  for(int i_node=proposed_partition(i)-1; i_node<proposed_partition(i+1)-1; i_node++){
150  proc_time += estimated_time_per_vertex(i_node);
151  }
152  if(proc_time>largest_time){
153  //largest_time_ind = i;
154  largest_time = proc_time;
155  }
156  }
157  return largest_time;
158  }
159 
161  reset_timer();
162 
163  // Model can now be used
164  model_is_initialized = true;
165  }
166 
167  void update_model(const View<int*,CLayout,HostType>& current_partition){
169  // Get time spent since last model update
170  double time_spent_this_rank = get_time_since_previous_call();
171 
172  // Reduce onto plane 0
173  // Could try MPI_SUM rather than MPI_MAX
174  double time_spent;
175  if(n_periods>1){
176 #ifdef USE_MPI
177  MPI_Reduce(&time_spent_this_rank, &time_spent, 1, MPI_DOUBLE, MPI_MAX, 0, inter_period_comm);
178 #endif
179  }else{
180  time_spent = time_spent_this_rank;
181  }
182 
183  // Allocate for timing of each
184  View<double*, HostType> all_periods_timings(NoInit("all_periods_timings"), n_unique_ranks);
185 #ifdef USE_MPI
186  // Gather from all ranks in plane 0
187  MPI_Gather(&time_spent, 1, MPI_DOUBLE, all_periods_timings.data(), 1, MPI_DOUBLE, 0, period_comm);
188 #endif
189 
190  if (is_rank_zero()){
191  // Look at the timing prediction made last time
193  if(verbose) printf("\nPredicted max time in this region was %1.5e", predicted_max_region_time);
194 
195  // Get max region time
197  for(int i=0; i<all_periods_timings.size(); i++){
198  observed_max_region_time = std::max(observed_max_region_time, all_periods_timings(i));
199  }
200  if(verbose) printf("\nObserved max time in this region was %1.5e", observed_max_region_time);
201 
202  // Take the max here so that there is never an assumed overshoot
203  if(predicted_max_region_time!=0.0){ // If time is zero, there hasn't been a prediction yet
205  }
206  if(verbose) printf("\nThe prediction undershot by a factor of %1.5e", observed_max_region_time/predicted_max_region_time);
207 
208  update_model_no_history(current_partition, all_periods_timings);
209  }
210  }else{
211  exit_XGC("Error: Update method not available\n");
212  }
213  }
214 };
215 
217  public:
218  enum class WeightingAlgorithm{
221  Fortran,
222  Default
223  };
224 
225  enum class ConstraintOption{
226  ParticleCount=0
227  };
228 
229  enum class ReweightOption{
230  Always=0,
231  IfBetter
232  };
233 
234  private:
235 
237 
238  std::vector<LoadRegion> regions;
239 
240  View<int*,CLayout,HostType> proposed_partition;
241 
243  bool verbose;
244 
246 
247  double get_even_division(const View<double*,HostType>& input, int n) const{
248  double total = 0.0;
249  Kokkos::parallel_reduce("sum", Kokkos::RangePolicy<HostExSpace>(0,input.size()), [=]( const int i, double& l_total){
250  l_total += input(i);
251  }, total);
252 
253  return (total/n);
254  }
255 
256 
257  bool greedily_fill_partition(const View<double*,HostType>& weight, const View<double*,HostType>& constraint1, double target_weight_per_rank){
258  int nnode = weight.size();
259  int nproc = proposed_partition.size()-1;
260 
261  // Start from rank 0 of the plane; assign nodes until it meets the desired weight per rank, then move to next rank
262  double constraint1_in_this_rank = 0.0;
263  double weight_in_this_rank = 0.0;
264  proposed_partition(0) = 1; // 1-indexed
265  int pid = 0;
266  bool assign_one_node_per_proc = false;
267  for(int i_node = 0; i_node<nnode; i_node++){
268  // Since every rank needs at last one node, switch to assigning at every
269  // iteration if there are only as many nodes left as ranks
270  if(nnode-i_node==nproc-pid) assign_one_node_per_proc = true;
271 
272  // Check if a criterion is met to consider the rank sufficiently loaded
273  bool rank_is_loaded = false;
274 
275  // Criterion 1: We are out of nodes
276  if(assign_one_node_per_proc) rank_is_loaded = true;
277 
278  // Criterion 2: We have hit a constraint
279  if(constraint1_in_this_rank>=constraint1_max) rank_is_loaded = true;
280 
281  // Criterion 3: We have loaded the rank with an even amount of work
282  if(weight_in_this_rank>=target_weight_per_rank) rank_is_loaded = true;
283 
284  // If there are enough particles assigned to this rank, move to the next rank
285  if(rank_is_loaded){
286  proposed_partition(pid+1) = i_node+1; // 1-indexed
287  if(verbose) printf("\nRank %d is loaded (Nodes %d-%d); weight=%1.4e, ptl*nplanes=%d", pid, proposed_partition(pid), proposed_partition(pid+1)-1, weight_in_this_rank, int(constraint1_in_this_rank));
288  constraint1_in_this_rank = 0.0;
289  weight_in_this_rank = 0.0;
290  pid++;
291  if(pid==nproc-1) break;
292  }
293  constraint1_in_this_rank += constraint1(i_node);
294  weight_in_this_rank += weight(i_node);
295  }
296 
297  // Last value will always be nnode+1
298  proposed_partition(nproc) = nnode+1;
299 
300  // Check n_ptl in final rank
301  constraint1_in_this_rank = 0.0;
302  for(int i_node=proposed_partition(nproc-1)-1; i_node<nnode; i_node++){
303  constraint1_in_this_rank += constraint1(i_node);
304  }
305  if(constraint1_in_this_rank>=constraint1_max){
306  // Return false if the constraint was not met
307  return false;
308  }else{
309  return true;
310  }
311  }
312 
313  // Predict performance of new partition based on model
314  double get_largest_predicted_time(const View<int*,CLayout,HostType>& partition, const View<double*,HostType>& weight) const{
315  double largest_time = 0.0;
316  //int largest_time_ind = 0;
317  for(int i=0; i<(partition.size()-1); i++){
318  double proc_time = 0.0;
319  for(int i_node=partition(i)-1; i_node<partition(i+1)-1; i_node++){
320  proc_time += weight(i_node);
321  }
322  if(proc_time>largest_time){
323  //largest_time_ind = i;
324  largest_time = proc_time;
325  }
326  }
327  return largest_time;
328  }
329 
330  void one_weight_balance(const View<double*,HostType>& weight, const View<double*,CLayout,HostType> constraint1){
331  int nproc = proposed_partition.size()-1;
332 
333  // Ideally, each rank would get the same amount of work, i.e. the average
334  double ideal_weight_per_rank = get_even_division(weight, nproc);
335 
336  // Make initial attempt, targeting even distribution of work
337  bool meets_constraints = greedily_fill_partition(weight, constraint1, ideal_weight_per_rank);
338 
339  if(!meets_constraints){
340  // An equal work distribution cannot be assigned due to a constraint.
341  // First, determine the weights if only the constraint were followed
342  meets_constraints = greedily_fill_partition(constraint1, constraint1, get_even_division(constraint1, nproc));
343  if(!meets_constraints) exit_XGC("\nUnexpected issue in load balance: constraint-based partition doesn't satisfy constraints\n");
344 
345  // This partition meets the constraints, but the proposed timing is likely unacceptable
346  double upper_limit_weight_per_rank = get_largest_predicted_time(proposed_partition, weight);
347  if(verbose) printf("The ideal distribution (%1.3e) does not satisfy constraints. This distribution (%1.3e) does.\n", ideal_weight_per_rank, upper_limit_weight_per_rank);
348 
349  // Bisect the difference between the original target time and the upper limit, and see if that satisfies the constraints
350  // Continue until the load balance has been minimized to a precision based on the the original desired time
351  const double desired_precision = 0.01; // Fraction of imbalance that's worth even checking whether it can be removed
352  double desired_step_size = ideal_weight_per_rank*desired_precision;
353  double step_size = upper_limit_weight_per_rank - ideal_weight_per_rank;
354  double compromise_weight_per_rank = upper_limit_weight_per_rank;
355  if(verbose) printf("\nEmploying a binary search to narrow down on the best option.");
356  while(step_size>desired_step_size){
357  // Halve the size of the step
358  step_size /= 2;
359 
360  if(meets_constraints){
361  // Try reducing the load imbalance since constraints were met
362  compromise_weight_per_rank -= step_size;
363  }else{
364  // Try raising the load imbalance since constraints were broken
365  compromise_weight_per_rank += step_size;
366  }
367 
368  // Get new partition
369  meets_constraints = greedily_fill_partition(weight, constraint1, compromise_weight_per_rank);
370  if(verbose) printf("\n Stepped by %1.3e to %1.3e. The new partition does%s meet constraints.", step_size, compromise_weight_per_rank, meets_constraints?"" : "nt");
371  }
372  // In case we end at a partition that fails
373  while(!meets_constraints){
374  compromise_weight_per_rank += step_size;
375  meets_constraints = greedily_fill_partition(weight, constraint1, compromise_weight_per_rank);
376  if(verbose) printf("\n Stepped by %1.3e UP to %1.3e. The new partition does%s meet constraints.", step_size, compromise_weight_per_rank, meets_constraints?"" : "nt");
377  }
378  if(verbose) printf("\n");
379  }
380  }
381 
382  // TODO
383  // Evaluate overall model performance
384  // MAIN_LOOP time vs sum of regions (% coverage)
385  //
386 
387  /* This function assesses the proposed partition and decides whether it is worth switching to.
388  * The simplest formulation is to recommend the new partition if the new partition is predicted to be faster
389  * than the existing one. There is also a factor to account for historical inaccuracy of the previous estimate.
390  * A future option would be to require a certain level of improvement before repartitioning. */
392  // Total time spent in regions (assuming that global barriers demarcate them):
393  double observed_max_all_rgns_time = 0.0;
394  for(int i=0; i<regions.size(); i++){
395  observed_max_all_rgns_time += regions[i].get_observed_max_region_time();
396  }
397 
398  // Total predicted time, accounting for observed prediction undershoot for each region
399  double predicted_max_all_rgns_time = 0.0;
400  for(int i=0; i<regions.size(); i++){
401  predicted_max_all_rgns_time += regions[i].get_largest_predicted_time(proposed_partition)*regions[i].get_prediction_undershoot();
402  }
403 
404  double fractional_improvement = 1.0 - predicted_max_all_rgns_time/observed_max_all_rgns_time;
405 
406  if(verbose){
407  printf("\nDetermining whether to adopt the proposed partition:");
408  printf("\n Observed time with current partition was: %1.3e", observed_max_all_rgns_time);
409  printf("\n Predicted time with proposed partition, adjusting for historical undershoot (of Rgn 0: %1.3e): %1.3e", regions[0].get_prediction_undershoot(), predicted_max_all_rgns_time);
410  printf("\n Adopted if Fractional improvement of adjusted prediction (%1.3e) exceeds specified threshold (%1.3e)\n", fractional_improvement, threshold_to_rebalance);
411  }
412 
413  // Recommend the new partition if the predicted time is less than the time observed with the current partition
414  if(fractional_improvement > threshold_to_rebalance){
415  return true;
416  }else{
417  return false;
418  }
419  }
420 
422  // Print new partition
423  for(int i = 0; i<proposed_partition.size()-1; i++){
424  int nnodes_for_proc = proposed_partition(i+1) - proposed_partition(i);
425  printf("process %d : nnodes = %d; range = (%d - %d)\n", i, nnodes_for_proc, proposed_partition(i), proposed_partition(i+1)-1);
426  }
427  }
428 
429  void update_model(const View<int*,CLayout,HostType>& current_partition){
430  for(int i=0; i<regions.size(); i++){
431  regions[i].update_model(current_partition);
432  }
433  }
434 
436  for(int i=0; i<regions.size(); i++){
437  regions[i].initialize_model();
438  }
439  }
440 
442  bool is_initialized=true;
443  for(int i=0; i<regions.size(); i++){
444  is_initialized = (is_initialized && regions[i].get_model_is_initialized());
445  }
446  return is_initialized;
447  }
448 
449  void propose_new_partition(const Kokkos::View<double*,Kokkos::LayoutRight,HostType>& ptl_count, WeightingAlgorithm weighting_algorithm){
450  auto constraint1 = ptl_count; // Constrain based on particle count (memory)
451  if(weighting_algorithm == WeightingAlgorithm::SingleRegionBalance){
452  if(regions.size()!=1) exit_XGC("Error: Load balancing is currently single-region only\n");
453  one_weight_balance(regions[0].get_estimated_time_per_vertex(), constraint1);
454  }else if(weighting_algorithm == WeightingAlgorithm::ParticleBalance){
455  one_weight_balance(ptl_count, constraint1);
456  }
457 
458  if(verbose){
459  printf("\nNewly PROPOSED partition:\n");
461  }
462  }
463 
465  const VelocityGrid& vgrid, Plasma& plasma, DomainDecomposition<DeviceType>& pol_decomp, WeightingAlgorithm weighting_algorithm){
466  if(weighting_algorithm==WeightingAlgorithm::Fortran){
467  // In the fortran algorithm the new distribution is calculated and set here
468  // Count number of particles belonging to each node
469  Kokkos::View<double**,Kokkos::LayoutRight,HostType> ptl_count = count_ptl_per_node_elec_main_ion(grid, magnetic_field, plasma, sml.electron_on);
470 
471  if(pol_decomp.gvid0_pid_h.size()==2){
472  // Skip set_weights if there is only one rank per plane
473  pol_decomp.gvid0_pid_h(0) = 1;
474  pol_decomp.gvid0_pid_h(1) = pol_decomp.nnodes_on_plane + 1;
475  }else{
476  TIMER("SET_WEIGHTS",
477  set_weights(pol_decomp.gvid0_pid_h.data(), ptl_count.data(), plasma.f0_node_cost.data()) );
478  }
479  }else{
480 #ifdef USE_MPI
481  // Broadcast proposal to all ranks
482  MPI_Bcast(proposed_partition.data(), proposed_partition.size(), MPI_INT, 0, SML_COMM_WORLD);
483 #endif
484 
485  // Copy proposal to pol_decomp
486  Kokkos::deep_copy(pol_decomp.gvid0_pid_h, proposed_partition);
487 
488  if(is_rank_zero()){
489  printf("\nNEW PARTITION:\n");
491  }
492  }
493 
494  // Update bounds and device copy of the decomposition array
495  pol_decomp.update_pol_decomp();
496  }
497 
499  const VelocityGrid& vgrid, Plasma& plasma, DomainDecomposition<DeviceType>& pol_decomp,
500  const View<int*,CLayout,HostType>& old_partition){
501  if (sml.f0_grid){
502  // Move f information to correct MPI rank after decomposition update
503  TIMER("F0_REDISTRIBUTE",
504  f0_redistribute(plasma, pol_decomp, grid, magnetic_field, vgrid, old_partition) );
505  }
506 
507  // Shift particles to correct MPI rank after decomposition update
508  TIMER("SHIFT_R",
509  shift_all_species(plasma, grid, magnetic_field, pol_decomp, Shift::NoPhase0) );
510  }
511 
512  bool will_rebalance(ReweightOption reweight_option, WeightingAlgorithm weighting_algorithm, double f0_cost){
513  if(reweight_option==ReweightOption::Always){
514  // No need to check if model is an improvement
515  return true;
516  }else{
517  if(weighting_algorithm==WeightingAlgorithm::Fortran){
518  // Calculate load imbalance
519  calculate_load_imbalance(f0_cost);
520 
521  // Assess if load imbalance merits a rebalancing
522  int should_rebalance_int = assess_whether_to_rebalance_load();
523 
524  // Reset fortran cost trackers
526 
527  return (should_rebalance_int==1);
528  }else{
529  // Evaluate the proposed partition on rank 0
530  bool should_rebalance;
531  if(weighting_algorithm==WeightingAlgorithm::ParticleBalance){
532  // Since ParticleBalance doesn't contain an internal model, cant evaluate the partition
533  // so just always approve, for now.
534  // Probably better to have the model updatable by something other than time so it can still
535  // be assessed in this mode
536  return true;
537  }else{
538  if(is_rank_zero()) should_rebalance = recommend_proposed_partition();
539  // Convert to int because MPI_C_BOOL documentation is confusing
540  int should_rebalance_int = (should_rebalance ? 1 : 0);
541 #ifdef USE_MPI
542  MPI_Bcast(&should_rebalance_int, 1, MPI_INT, 0, SML_COMM_WORLD);
543 #endif
544  return (should_rebalance_int==1);
545  }
546  }
547  }
548  }
549 
550  public:
551 
553  : proposed_partition(NoInit("proposed_partition"), pol_decomp.gvid0_pid_h.layout())
554  {
555  double max_mem_redist_gb = 10.0;
556  std::string weighting_algorithm_str = "Fortran";
557  if(nlr.namelist_present("load_balance_param")){
558  nlr.use_namelist("load_balance_param");
559  max_mem_redist_gb = nlr.get<double>("max_mem_redist_gb", 10.0);
560  weighting_algorithm_str = nlr.get<std::string>("weighting_algorithm", "Fortran");
561  threshold_to_rebalance = nlr.get<double>("threshold_to_rebalance", 0.02);
562  verbose = nlr.get<bool>("verbose", false);
563  }else{
564  threshold_to_rebalance = 0.02;
565  verbose = false;
566  }
567 
568  // Simple model for memory usage from load balance - just particles
569  double max_n_ptl_on_rank = max_mem_redist_gb*1024*1024*1024/80.0; // ~ 80 B per particle
570 
571  if(weighting_algorithm_str=="Fortran"){
573  }else if(weighting_algorithm_str=="ParticleBalance"){
575  }else if(weighting_algorithm_str=="SingleRegionBalance"){
577  }else{
578  exit_XGC("\nError: weighting_algorithm input not valid.");
579  }
580 
581 #ifdef USE_MPI
582  // Set up load balance regions
585 
586  regions.push_back(LoadRegion(pol_decomp.nnodes_on_plane, pol_decomp.mpi.intpl_comm, pol_decomp.mpi.plane_comm, update_method, verbose,
587  "Push",
588  {"ipc1:PUSHE",
589  "ipc1:PUSHI",
590  "ipc2:PUSHE",
591  "ipc2:PUSHI"}));
592  }
593 
594  // Hard-code for now
596 
597  if(constraint_option == ConstraintOption::ParticleCount){
598  // Normalize by n_planes since we are using the sum of planes as the constraint
599  constraint1_max = max_n_ptl_on_rank*pol_decomp.mpi.n_intpl_ranks;
600  }else{
601  exit_XGC("\nError: ParticleConstraint is only available ConstraintOption.\n");
602  }
603 #endif
604  }
605 
607  const VelocityGrid& vgrid, Plasma& plasma, DomainDecomposition<DeviceType>& pol_decomp, ReweightOption reweight_option,
608  WeightingAlgorithm weighting_algorithm = WeightingAlgorithm::Default){
609 
610  // Skip rebalancing if poloidal decomposition is turned off. Normally we should put the function in
611  // an if statement rather than using return; kept here in case we might want some balancing for some reason
612  // even without poloidal decomposition
613  if(pol_decomp.pol_decomp==false) return;
614 
615  // Rebalance is trivial if there is only one rank on a plane (generalize this from no-MPI case)
616 #ifdef USE_MPI
617 
618  if(weighting_algorithm == WeightingAlgorithm::Default){
619  weighting_algorithm = default_weighting_algorithm;
620  }
621 
622  if(default_weighting_algorithm==WeightingAlgorithm::Fortran){
623  // Override input algorithm; if the default is Fortran, it should always use the fortran load balancer
624  weighting_algorithm = default_weighting_algorithm;
625  }
626 
627  if(is_rank_zero() && verbose){
628  if(weighting_algorithm==WeightingAlgorithm::Fortran) printf("\nLoad balance called with Fortran algorithm\n");
629  else if(weighting_algorithm==WeightingAlgorithm::ParticleBalance) printf("\nLoad balance called with Ptl algorithm\n");
630  else if(weighting_algorithm==WeightingAlgorithm::SingleRegionBalance) printf("\nLoad balance called with SingleRegion algorithm\n");
631  }
632 
633  GPTLstart("REBALANCE");
634  if(weighting_algorithm!=WeightingAlgorithm::Fortran){
635  GPTLstart("PTL_COUNT");
637  long long int total_n_ptl = species.get_total_n_ptl();
638  int max_n_ptl = species.get_max_n_ptl();
639  double avg_n_ptl = (double)(total_n_ptl)/pol_decomp.mpi.nranks;
640  double max_ratio = max_n_ptl/avg_n_ptl;
641  if(is_rank_zero()) printf(" Species %d: (max/avg ptl per rank = %1.2f); total n_ptl = %lld\n", species.idx, max_ratio, total_n_ptl);
643  GPTLstop("PTL_COUNT");
644 
645  GPTLstart("PTL_COUNT_PER_NODE");
646  auto ptl_count = count_all_ptl_per_node(grid, magnetic_field, plasma);
647  GPTLstop("PTL_COUNT_PER_NODE");
648 
649  if(weighting_algorithm!=WeightingAlgorithm::ParticleBalance){
650  if(model_is_initialized()){
651  // Update the model with the latest timing
652  TIMER("LOAD_BAL_UPDATE",
653  update_model(pol_decomp.gvid0_pid_h) );
654  }else{
655  if(is_rank_zero() && verbose) printf("Initializing timing model, no partition proposal yet.");
656  initialize_model();
657  GPTLstop("REBALANCE");
658  return;
659  }
660  }
661 
662  // With the updated model, proposed a new partition
663  GPTLstart("LOAD_BAL_NEW_PART");
664  if (is_rank_zero()) propose_new_partition(ptl_count, weighting_algorithm);
665  GPTLstop("LOAD_BAL_NEW_PART");
666  }
667 
668  GPTLstart("LOAD_BAL_REBAL");
669  double f0_cost = sml.f0_grid ? sum_view(plasma.f0_node_cost) : 0.0;
670  if(will_rebalance(reweight_option, weighting_algorithm, f0_cost)){
671  // Save the old partition, it is used to save some communication in the f0 redistribution
672  View<int*,CLayout,HostType> old_partition(NoInit("old_partition"), pol_decomp.gvid0_pid_h.layout());
673  Kokkos::deep_copy(old_partition, pol_decomp.gvid0_pid_h);
674 
675  // Update pol_decomp
676  TIMER("LOAD_BAL_SET_NEW",
677  set_new_partition(sml, grid, magnetic_field, vgrid, plasma, pol_decomp, weighting_algorithm) );
678 
679  // Update particles and f0
680  TIMER("LOAD_BAL_REDIST",
681  redistribute_load(sml, grid, magnetic_field, vgrid, plasma, pol_decomp, old_partition) );
682  }
683  GPTLstop("LOAD_BAL_REBAL");
684  GPTLstop("REBALANCE");
685 #endif
686  }
687 };
688 
689 #endif
View< double *, HostType > estimated_time_per_vertex
Definition: load_balance.hpp:24
static int GPTLstart(const char *name)
Definition: timer_macro.hpp:9
bool will_rebalance(ReweightOption reweight_option, WeightingAlgorithm weighting_algorithm, double f0_cost)
Definition: load_balance.hpp:512
bool is_rank_zero()
Definition: globals.hpp:27
void one_weight_balance(const View< double *, HostType > &weight, const View< double *, CLayout, HostType > constraint1)
Definition: load_balance.hpp:330
MPI_Comm SML_COMM_WORLD
Definition: my_mpi.cpp:4
double threshold_to_rebalance
Definition: load_balance.hpp:242
T get(const string &param, const T default_val, int val_ind=0)
Definition: NamelistReader.hpp:373
double time_accumulated
Definition: load_balance.hpp:41
double predicted_max_region_time
Definition: load_balance.hpp:29
double observed_max_region_time
Definition: load_balance.hpp:30
double get_largest_predicted_time(const View< int *, CLayout, HostType > &partition, const View< double *, HostType > &weight) const
Definition: load_balance.hpp:314
Definition: velocity_grid.hpp:8
Definition: sml.hpp:8
int n_unique_ranks
How many ranks are in a &#39;period&#39; (in tokamaks, in a plane)
Definition: load_balance.hpp:38
subroutine plasma(grid, itr, p, dene_out, deni_out, Te_out, Ti_out, Vparai_out)
Calculate the plasma density, temperature, and parallel velocity for a point in triangle itr using pl...
Definition: neutral_totalf.F90:1238
Definition: plasma.hpp:109
Definition: NamelistReader.hpp:193
Definition: magnetic_field.hpp:12
void propose_new_partition(const Kokkos::View< double *, Kokkos::LayoutRight, HostType > &ptl_count, WeightingAlgorithm weighting_algorithm)
Definition: load_balance.hpp:449
int idx
Index in all_species.
Definition: species.hpp:78
bool verbose
Definition: load_balance.hpp:243
void for_all_nonadiabatic_species(F func, DevicePtlOpt device_ptl_opt=UseDevicePtl)
Definition: plasma.hpp:128
double prediction_undershoot
Definition: load_balance.hpp:31
double get_even_division(const View< double *, HostType > &input, int n) const
Definition: load_balance.hpp:247
Kokkos::View< double **, Kokkos::LayoutRight, HostType > count_ptl_per_node_elec_main_ion(const Grid< DeviceType > &grid, const MagneticField< DeviceType > &magnetic_field, Plasma &plasma, bool kinetic_electrons)
Definition: count_ptl_per_node.cpp:29
void update_pol_decomp()
Definition: domain_decomposition.tpp:109
void update_model(const View< int *, CLayout, HostType > &current_partition)
Definition: load_balance.hpp:429
long long int get_total_n_ptl()
Definition: species.hpp:671
void f0_redistribute(Plasma &plasma, const DomainDecomposition< DeviceType > &pol_decomp, const Grid< DeviceType > &grid, const MagneticField< DeviceType > &magnetic_field, const VelocityGrid &vgrid, const View< int *, CLayout, HostType > &old_partition)
Definition: f0_redistribute.cpp:184
void set_new_partition(const Simulation< DeviceType > &sml, const Grid< DeviceType > &grid, const MagneticField< DeviceType > &magnetic_field, const VelocityGrid &vgrid, Plasma &plasma, DomainDecomposition< DeviceType > &pol_decomp, WeightingAlgorithm weighting_algorithm)
Definition: load_balance.hpp:464
#define TIMER(N, F)
Definition: timer_macro.hpp:24
void rebalance(const Simulation< DeviceType > &sml, const Grid< DeviceType > &grid, const MagneticField< DeviceType > &magnetic_field, const VelocityGrid &vgrid, Plasma &plasma, DomainDecomposition< DeviceType > &pol_decomp, ReweightOption reweight_option, WeightingAlgorithm weighting_algorithm=WeightingAlgorithm::Default)
Definition: load_balance.hpp:606
void use_namelist(const string &namelist)
Definition: NamelistReader.hpp:355
ConstraintOption
Definition: load_balance.hpp:225
double get_estimated_time_per_vertex(int i) const
Definition: load_balance.hpp:123
int my_period_rank
Definition: load_balance.hpp:39
int nnodes_on_plane
Number of nodes on local plane.
Definition: domain_decomposition.hpp:43
void calculate_load_imbalance(double f0_cost)
void initialize_model()
Definition: load_balance.hpp:160
WeightingAlgorithm default_weighting_algorithm
Definition: load_balance.hpp:236
View< int *, CLayout, HostType > proposed_partition
Which processors get which vertices.
Definition: load_balance.hpp:240
double get_time_since_previous_call()
Definition: load_balance.hpp:59
bool get_model_is_initialized() const
Definition: load_balance.hpp:139
UpdateMethod
Definition: load_balance.hpp:18
std::string region_name
Definition: load_balance.hpp:25
bool verbose
Definition: load_balance.hpp:27
bool f0_grid
Whether to use f0 grid.
Definition: sml.hpp:65
std::vector< LoadRegion > regions
Definition: load_balance.hpp:238
bool namelist_present(const string &namelist)
Definition: NamelistReader.hpp:351
UpdateMethod update_method
Definition: load_balance.hpp:43
bool model_is_initialized
Definition: load_balance.hpp:28
bool electron_on
Use kinetic electrons.
Definition: sml.hpp:64
void reset_cost_trackers()
void redistribute_load(const Simulation< DeviceType > &sml, const Grid< DeviceType > &grid, const MagneticField< DeviceType > &magnetic_field, const VelocityGrid &vgrid, Plasma &plasma, DomainDecomposition< DeviceType > &pol_decomp, const View< int *, CLayout, HostType > &old_partition)
Definition: load_balance.hpp:498
double get_largest_predicted_time(const View< int *, CLayout, HostType > &proposed_partition) const
Definition: load_balance.hpp:144
void initialize_model()
Definition: load_balance.hpp:435
T::value_type sum_view(const T &view)
Definition: view_arithmetic.hpp:56
bool recommend_proposed_partition()
Definition: load_balance.hpp:391
void exit_XGC(std::string msg)
Definition: globals.hpp:37
bool greedily_fill_partition(const View< double *, HostType > &weight, const View< double *, HostType > &constraint1, double target_weight_per_rank)
Definition: load_balance.hpp:257
int n_periods
How many repeating periods there are; in tokamaks this is planes.
Definition: load_balance.hpp:37
Definition: magnetic_field.F90:1
Definition: load_balance.hpp:16
void print_new_partition()
Definition: load_balance.hpp:421
WeightingAlgorithm
Definition: load_balance.hpp:218
int assess_whether_to_rebalance_load()
LoadBalance(NLReader::NamelistReader &nlr, const DomainDecomposition< DeviceType > &pol_decomp)
Definition: load_balance.hpp:552
View< double *, HostType > get_estimated_time_per_vertex() const
Definition: load_balance.hpp:127
Definition: plasma.hpp:14
double constraint1_max
Definition: load_balance.hpp:245
void set_weights(int *gvid0_pid, double *ptl_count, double *f0_node_cost)
View< double *, CLayout, HostType > count_all_ptl_per_node(const Grid< DeviceType > &grid, const MagneticField< DeviceType > &magnetic_field, Plasma &plasma)
Definition: count_ptl_per_node.cpp:71
ReweightOption
Definition: load_balance.hpp:229
View< double *, CLayout, HostType > f0_node_cost
Definition: plasma.hpp:38
void shift_all_species(Plasma &plasma, const Grid< DeviceType > &grid, const MagneticField< DeviceType > &magnetic_field, const DomainDecomposition< DeviceType > &pol_decomp, Shift::ShiftPh0 shift_ph0)
Definition: shift.cpp:301
void reset_timer()
Definition: load_balance.hpp:118
Definition: species.hpp:75
bool model_is_initialized()
Definition: load_balance.hpp:441
void update_model(const View< int *, CLayout, HostType > &current_partition)
Definition: load_balance.hpp:167
std::vector< std::string > timer_names
Definition: load_balance.hpp:26
Definition: shift.hpp:10
bool pol_decomp
Use poloidal decomposition.
Definition: domain_decomposition.hpp:36
double get_observed_max_region_time() const
Definition: load_balance.hpp:135
Definition: load_balance.hpp:216
void touch_timers()
Definition: load_balance.hpp:83
Kokkos::ViewAllocateWithoutInitializing NoInit
Definition: space_settings.hpp:68
void update_model_no_history(const View< int *, CLayout, HostType > &current_partition, const View< double *, HostType > &all_periods_timings)
Definition: load_balance.hpp:45
double get_prediction_undershoot() const
Definition: load_balance.hpp:131
int get_max_n_ptl()
Definition: species.hpp:682
static int GPTLstop(const char *name)
Definition: timer_macro.hpp:10
Kokkos::View< int *, Kokkos::LayoutRight, HostType > gvid0_pid_h
Which processors get which vertices (host)
Definition: domain_decomposition.hpp:53