XGC1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
col_grid.hpp
Go to the documentation of this file.
1 #ifndef COL_GRID_HPP
2 #define COL_GRID_HPP
3 
4 #include <vector>
5 #include <stdio.h>
6 #include <Kokkos_Core.hpp>
7 #include <Kokkos_DualView.hpp>
8 
9 #include "xgc_io.hpp"
10 #include "domain_decomposition.hpp"
11 #include "velocity_grid.hpp"
12 #include "vgrid_distribution.hpp"
13 #include "magnetic_field.hpp"
14 
15 #include "col_grid_matrix.hpp"
16 #include "col_species.hpp"
17 #include "col_vgrids.hpp"
18 
19 //> XGC's multi-species Fokker-Planck-Landau collision operator
20 // see R. Hager et al., Journal of Computational Physics 315 (2016), pp. 644-660
21 // E. S. Yoon et al., Physics of Plasmas 21 (2014), 032503
22 //
23 // The headers of the subroutines in this module refer to the corresponding parts of Hager et al.
24 // where applicable in order to make the source code easier to understand.
25 
26 
27 // Enum for derivatives of ED array
28 namespace ED{
29 enum{
30  RR=0,
31  RZ,
32  ZZ,
33  ER,
34  EZ,
35  N
36 };
37 }
38 
39 // <SOLVER-related>
40 const int vpic_inner_iter_max=20;
41 
42 namespace Convergence{
43 
44 // Ideally this would be an enum class,
45 // but the are MPI and I/O operations that expect ints
46 enum Status{
55 };
56 
57 inline bool is_okay(Status convergence_status){
58  return (convergence_status!=MaxedIter &&
59  convergence_status!=NaNOrInfFound &&
60  convergence_status!=FirstStepFail);
61 }
62 
63 class Moments{
64  // Values
65  double w_sum; //> sum of energy over species
66  double p_sum; //> sum of momentum over species
67  double min_p_thres; //> momentum error criterion
68 
69  // Deltas
70  double dw_sum; //> sum of energy change over species (i.e. error in energy)
71  double dp_sum; //> sum of momentum change over species (i.e. error in momentum)
72  double dn_n_max; //> maximum fraction of density changes among species (i.e. maximum relative error in density in all species )
74  bool en_exit_ok;
76 
77  //> Checks the convergence criterion (i.e. conservation of mass, energy and momentum)
78  template<class Device>
79  inline void eval(int isp, int mesh_ind, const CollisionVelocityGrids<Device>& cvg, const CollisionSpecies<Device>& cs,double& dw,double& dp,double& dn_n) const{
80  double vpic_dn = 0.0;
81  double vpic_dw = 0.0;
82  double vpic_dp = 0.0;
83 
84  int gri = cs.s.h_view(isp, mesh_ind).grid_index;
85 
86  for (int index_i = 0; index_i<cvg.mesh_z.extent(2); index_i++){
87  double mesh_z2 = cvg.mesh_z(mesh_ind,gri,index_i)*cvg.mesh_z(mesh_ind,gri,index_i);
88  double vp_vol_fac = cvg.vp_vol_fac(index_i);
89  for (int index_j = 0; index_j<cvg.mesh_r.extent(2); index_j++){
90  double mesh_r2 = cvg.mesh_r(mesh_ind,gri,index_j)*cvg.mesh_r(mesh_ind,gri,index_j);
91  double vpic_dfc = (cs.pdf_np1.h_view(mesh_ind,isp,index_i,index_j) - cs.pdf_n.h_view(mesh_ind,isp,index_i,index_j))
92  *cvg.vol_h(mesh_ind,gri,index_j)*vp_vol_fac;
93 
94  vpic_dn += vpic_dfc;
95  vpic_dw += vpic_dfc*(mesh_z2 + mesh_r2);
96  vpic_dp += vpic_dfc*cvg.mesh_z(mesh_ind,gri,index_i);
97  }
98  }
99 
100  dw = vpic_dw*cs.s.h_view(isp,mesh_ind).mass;
101  dp = vpic_dp*cs.s.h_view(isp,mesh_ind).mass;
102  dn_n = std::abs(vpic_dn/cs.s.h_view(isp,mesh_ind).dens);
103  }
104 
105  public:
106 
107  template<class Device>
108  inline void set(int mesh_ind, const CollisionSpecies<Device>& col_spall){
109  w_sum = 0;
110  p_sum = 0;
111  min_p_thres = 1e99; //just large value to be reinitialized with a small value
112  for (int spi = 0; spi<col_spall.n(); spi++){
113  w_sum += col_spall.s.h_view(spi,mesh_ind).ens;
114  p_sum += col_spall.s.h_view(spi,mesh_ind).mom;
115  min_p_thres = std::min( col_spall.s.h_view(spi,mesh_ind).mass*col_spall.s.h_view(spi,mesh_ind).dens*sqrt(col_spall.s.h_view(spi,mesh_ind).numeric_vth2),
116  min_p_thres );
117  }
118  }
119 
120  template<class Device>
121  inline bool evaluate_conservation(int mesh_ind, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall, bool& values_are_all_finite){
122  dw_sum = 0.0;
123  dp_sum = 0.0;
124  dn_n_max = 0.0;
125  dens_exit_ok = true;
126 
127  for (int spi = 0; spi<col_spall.n(); spi++){
128  // Re-compute conservation errors after correction
129  double dw,dp,dn_n;
130  eval(spi, mesh_ind, col_vgrids, col_spall,dw,dp,dn_n);// moments evalulation from difference betw. pdf_n and pdf_np1
131  dw_sum += dw;
132  dp_sum += dp;
133  if(dn_n_max<dn_n) dn_n_max = dn_n;
134  dens_exit_ok = dens_exit_ok && (dn_n<1e-10);
135  }
136 
137  // Check for NaNs and Infs
138  values_are_all_finite = (std::isfinite(dw_sum) && std::isfinite(dp_sum) && std::isfinite(dn_n_max));
139 
140  en_exit_ok = std::abs(dw_sum/w_sum) <= 1e-7;
141  // bool mom_exit_ok = std::abs(col_dp_sum)/(std::max(std::abs(col_p_sum),min_p_thres)) <= (col_f_vp_max/col_f_nvz);// v_par = max_v_par of simulation domain / vth
142  // ALS : momentum tolerance changed to the tolerance in XGC's former collisions module:
143  // For momentum tolerance use main ion mass and electron density as normalization (when there are more than 2 species).
144  double mass_use = (col_spall.s.h_view(0,mesh_ind).is_electron ? col_spall.s.h_view(1,mesh_ind).mass : col_spall.s.h_view(0,mesh_ind).mass);
145  mom_exit_ok = std::abs(dp_sum)/(std::max(std::abs(p_sum),1e-3*mass_use*col_spall.s.h_view(0,mesh_ind).dens)) <= 1.0e-7;
146 
147  return (dens_exit_ok && en_exit_ok && mom_exit_ok);
148  }
149 };
150 
151 }
152 
153 
154 template<class Device>
155 struct TmpColData{
156 
157  // These views are both on CPU and GPU - dual views
158  Kokkos::View<double***,Device> gammac_spall;
159  Kokkos::DualView<int[4][4],Device> mat_pos_rel_indx; // Shouldn't need to be a dual view
160 
161  Kokkos::View<double***,Device> fhalf;
162  Kokkos::View<double***,Device> dfdr;
163  Kokkos::View<double***,Device> dfdz;
164  Kokkos::View<double****,Device> EDs;
165  Kokkos::View<double******,Device> M_ab;
166  Kokkos::View<double*****,Device> M_s;
167 
168  TmpColData<Device>(int nvr, int nvz, int sp_num, int n_vgrids, int mb_n_nodes)
169  : // Note: layout left
170  gammac_spall("gammac_spall",mb_n_nodes,sp_num,sp_num),
171  fhalf("fhalf",mb_n_nodes,nvz-1,nvr-1),
172  dfdr("dfdr",mb_n_nodes,nvz-1,nvr-1),
173  dfdz("dfdz",mb_n_nodes,nvz-1,nvr-1),
174  EDs("EDs",mb_n_nodes,nvz-1,nvr-1,ED::N),
175  M_s("M_s",mb_n_nodes,n_vgrids, (nvr-1)*(nvz-1), ED::N, nvr-1),
176  M_ab("M_ab",mb_n_nodes,n_vgrids, n_vgrids-1, (nvr-1)*(nvz-1), 3, (nvr-1)*(nvz-1)),
177  mat_pos_rel_indx("mat_pos_rel_indx")
178  {
179  // Set up mat_pos_rel_indx and send to GPU
180  int mat_loc[16] = {4,5,7,8,
181  1,2,4,5,
182  3,4,6,7,
183  0,1,3,4};
184 
185  for (int i=0;i<16;i++)
186  mat_pos_rel_indx.h_view(i/4,i%4)=mat_loc[i];
187 
188 #ifdef USE_GPU
189  Kokkos::deep_copy(mat_pos_rel_indx.d_view, mat_pos_rel_indx.h_view);
190 #endif
191  }
192 };
193 
194 
195 inline VertexList vertices_in_range(const MagneticField<DeviceType>& magnetic_field, const Grid<DeviceType>& grid, bool exclude_private_region, double inner_psi_bound, double outer_psi_bound){
196  return VertexList(grid.nnode, KOKKOS_LAMBDA(const int inode){
197  if(inner_psi_bound <= grid.psi(inode) &&
198  outer_psi_bound >= grid.psi(inode) &&
199  grid.node_is_in_included_region(inode, exclude_private_region))
200  {
201  return true;
202  }else{
203  return false;
204  }
205  });
206 }
207 
208 template<class Device>
210 
211  public:
212 
214 
216 
219 
221 
223  View<int*,CLayout,HostType> n_subcycles;
225 
226  bool diag_on; // Switches file-output of convergence status of the collision operator on/off
227  std::shared_ptr<XGC_IO_Stream> io_stream;
228 
230 
231  //> Set up global variables of the collision operator
232  CollisionGrid(NLReader::NamelistReader& nlr, const MagneticField<DeviceType>& magnetic_field, const Grid<DeviceType>& grid, bool exclude_private_region, bool overwrite_existing_files)
233  : n_subcycles(NoInit("n_subcycles"), grid.nnode)
234  {
235  nlr.use_namelist("col_param");
236  double inner_psi_bound = nlr.get<double>("col_pin",magnetic_field.inpsi); // Minimum of psi range where collisions are performed.
237  double outer_psi_bound = nlr.get<double>("col_pout",magnetic_field.outpsi); // Maximum of psi range where collisions are performed.
238 
239  // Normalize by xpt
240  // Default values are already normalized, so only normalize if not default
241  if(nlr.present("col_pin")) inner_psi_bound *= magnetic_field.psi_norm();
242  if(nlr.present("col_pout")) outer_psi_bound *= magnetic_field.psi_norm();
243 
244  max_n_subcycles = nlr.get<int>("col_max_n_subcycles", 1); // Enables collision subcycling. Collision time step of an unconverged vertex will be reduced, increasing the number of cycles of that vertex up to a maximum of col_max_n_subcycles. If it still does not converge, the vertex will not be attempted again.
245 
246  start_step = nlr.get<int>("col_f_start", 1); // The time step at which collisions begin
247 
250  // Start off with every vertex having 1 subcycle
251  Kokkos::deep_copy(n_subcycles, 1);
252 
253  // These defaults have not been systematically optimized
254 #ifdef USE_GPU
255  int default_batch_size = 64;
256 #else
257  int default_batch_size = 1;//omp_get_max_threads();
258 #endif
259 
260  // Set default solver option
261 #if defined(USE_GINKGO) && defined(USE_GPU)
263 #else
265 #endif
266 
267  if(nlr.namelist_present("performance_param")){
268  nlr.use_namelist("performance_param");
269  batch_size = nlr.get<int>("mesh_batch_size", default_batch_size); // Collisions are batched (multiple vertices run in parallel) for better performance. The best value may vary by platform. Typically higher is better (for GPU simulations) but memory usage is linearly proportional to this value.
270  if(nlr.present("collisions_solver")){
271  std::string linalg_backend_str = nlr.get<std::string>("collisions_solver", "lapack"); // Which collisions solver to use.
272  // "lapack" is always used for CPU-only simulations
273  // "ginkgo" is available for GPU and improves performance
274  if(linalg_backend_str=="lapack") labackend = Collisions::LinAlgBackend::lapack;
275  else if(linalg_backend_str=="ginkgo") labackend = Collisions::LinAlgBackend::ginkgo;
276  else exit_XGC("\nError: Invalid input option. collisions_solver must be 'lapack' or 'ginkgo'.\n");
277  }
278  ginkgo_residual_reduction = nlr.get<double>("ginkgo_residual_reduction", 1.0e-16); // For the Ginkgo solver, the residual reduction
279  ginkgo_max_iterations = nlr.get<int>("ginkgo_max_iterations", 300); // For the Ginkgo solver, the maximum number of iterations
280  }else{
281  batch_size = default_batch_size;
282  ginkgo_residual_reduction = 1.0e-16;
283  ginkgo_max_iterations = 300;
284  }
285 
286 #ifndef USE_GINKGO
287  if(labackend == Collisions::LinAlgBackend::ginkgo) exit_XGC("\nError: collisions_solver=='ginkgo' was specified, but XGC was configured without Ginkgo.\n");
288 #endif
289 #ifndef USE_GPU
290  if(labackend == Collisions::LinAlgBackend::ginkgo) exit_XGC("\nError: The CPU-only version of XGC cannot use collisions_solver=='ginkgo'.\n");
291 #endif
292 
293  if(is_rank_zero()){
294  printf("\nUsing %s for collision solver.\n", (labackend == Collisions::LinAlgBackend::ginkgo ? "Ginkgo" : "LAPACK"));
295  }
296 
297 
298  // Save list of vertices on which to do collisions
299  vertices = vertices_in_range(magnetic_field, grid, exclude_private_region, inner_psi_bound, outer_psi_bound);
300 
301  // Diagnostic
302  nlr.use_namelist("diag_param");
303  diag_on = nlr.get<bool>("diag_col_convergence_stat_on",false);
304 
305  if(diag_on){
306  io_stream = std::make_shared<XGC_IO_Stream>();
307  io_stream->Init("collision_stats");
308 #ifdef USE_MPI
309  io_stream->Open("xgc.col_conv_status.bp", XGC_IO_Mode::Write, SML_COMM_WORLD);
310 #else
311  io_stream->Open("xgc.col_conv_status.bp", XGC_IO_Mode::Write);
312 #endif
313  }
314  }
315 
316  int largest_n_subcycles() const{
317  return (max_n_subcycles>1 ? max_view(n_subcycles) : 1);
318  }
319 
321  nlr.use_namelist("ptl_param");
322  int plasma_nspecies = nlr.get<int>("ptl_nsp",1);
323 
324  nlr.use_namelist("sml_param");
325  bool sml_electron_on = nlr.get<bool>("sml_electron_on", false);
326  if(sml_electron_on) plasma_nspecies += 1; // Add electrons
327 
328  VelocityGrid vgrid(nlr);
329 #ifdef USE_GPU
330  int default_batch_size = 64;
331 #else
332  int default_batch_size = 1;//omp_get_max_threads();
333 #endif
334  int col_grid_batch_size;
335  if(nlr.namelist_present("performance_param")){
336  nlr.use_namelist("performance_param");
337  col_grid_batch_size = nlr.get<int>("mesh_batch_size", default_batch_size);
338  }else{
339  col_grid_batch_size = default_batch_size;
340  }
341 
342  double device_col_GB_per_vertex = plasma_nspecies*std::max(plasma_nspecies-1,1)*(vgrid.nvp*vgrid.nmu)*(vgrid.nvp*vgrid.nmu)*8*BYTES_TO_GB;
343  double gpu_memory_usage = (col_grid_batch_size*device_col_GB_per_vertex);
344 
345  MemoryPrediction memory_prediction{"Fields", 0.0, gpu_memory_usage};
346 
347  return memory_prediction;
348  }
349 
350  // Core init
351  void core_init(int isp, int mesh_ind, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall ) const;
352 
353  // Get maxw_fac
354  static KOKKOS_INLINE_FUNCTION double get_maxw_fac(double mesh_dr, double mesh_r, double numeric_vth2) ;
355 
356  // Get numeric v_thermal equilibrium for a given species against a given grid
357  View<double*,Device> get_numeric_v_thermal_equil(int mb_n_nodes, int spi, int grj, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall) const;
358 
359  // Delta init
360  void core_delta_init(int mb_n_nodes, int gri, int grj, int spi, CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall) const;
361 
362  // LU_matrix_ftn
363  static KOKKOS_INLINE_FUNCTION void LU_matrix_ftn(int mesh_ind, int gri, int grj, int spi, int cell_i,int cell_j,const CollisionVelocityGrids<Device>& col_vgrids,int mprl_col, int mat_pos,double coeff1,double coeff2, const TmpColData<Device>& tcd, const Kokkos::View<int**,Device>& index_map_LU_d,typename Collisions::GridMatrix<Device>::values_array_t LU_values);
364 
365  // LU_matrix
366  void LU_matrix(int mb_n_nodes, int gri, int grj, int spi, const CollisionVelocityGrids<Device>& col_vgrids,const TmpColData<Device>& tcd, Collisions::GridMatrix<Device>* const mtx) const;
367 
368  // the core loop including the picard loop is now inside this function
369  void picard_loop(int vpic_inner_iter_max, const CollisionVelocityGrids<Device>& col_vgrids, CollisionSpecies<Device>& col_spall, TmpColData<Device>& tcd, int mb_n_nodes, Kokkos::View<Convergence::Status*,HostType>& convergence_status, Kokkos::View<Convergence::Moments*,HostType>& moments) const;
370 
371  // dispatch for E_and_D calc
372  void E_and_D(int mb_n_nodes, int gri, int grj, const CollisionVelocityGrids<Device>& col_vgrids, TmpColData<Device>& tcd) const;
373 
374  // E_and_D_s
375  static KOKKOS_INLINE_FUNCTION void E_and_D_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const TmpColData<Device>& tcd, int gri) ;
376 
377  // dispatch for angle_avg calc
378  void angle_avg(int mb_n_nodes, int gri, int grj, CollisionVelocityGrids<Device>& col_vgrids, TmpColData<Device>& tcd) const;
379 
380  // angle_avg_s
381  static KOKKOS_INLINE_FUNCTION void angle_avg_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids<Device>& col_vgrids, const TmpColData<Device>& tcd, int gri);
382 
383  // E_and_D_ab
384  static KOKKOS_INLINE_FUNCTION void E_and_D_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids<Device>& col_vgrids, const TmpColData<Device>& tcd, int gri, int grj);
385 
386  // angle_avg_ab
387  static KOKKOS_INLINE_FUNCTION void angle_avg_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids<Device>& col_vgrids, const TmpColData<Device>& tcd, int gri, int grj);
388 
389  void f_df(int mb_n_nodes, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall,int spi,int grj,TmpColData<Device>& tcd) const;
390 
391  // Main collision algorithm
392  Kokkos::View<Convergence::Status*,HostType> core(CollisionVelocityGrids<Device>& col_vgrids, CollisionSpecies<Device>& col_spall, TmpColData<Device>& tcd, int mb_n_nodes) const;
393 
394 
395  // Carries out collisions
396  void collision(std::vector<Species<DeviceType>> &all_species, const Moments& moments, const DomainDecomposition<DeviceType>& pol_decomp, const VertexList& assigned, const VGridDistribution<HostType>& f0_f, VGridDistribution<HostType>& df_out, double dt, View<int*,CLayout,HostType>& converged_all, View<double*,CLayout,HostType>& node_cost) const;
397 };
398 
399 #include "col_grid.tpp"
400 
401 #endif
double min_p_thres
Definition: col_grid.hpp:67
double inpsi
Boundary condition used in a few spots.
Definition: magnetic_field.hpp:42
void collision(std::vector< Species< DeviceType >> &all_species, const Moments &moments, const DomainDecomposition< DeviceType > &pol_decomp, const VertexList &assigned, const VGridDistribution< HostType > &f0_f, VGridDistribution< HostType > &df_out, double dt, View< int *, CLayout, HostType > &converged_all, View< double *, CLayout, HostType > &node_cost) const
Definition: col_grid.tpp:22
Definition: col_grid_matrix.hpp:23
double w_sum
Definition: col_grid.hpp:65
int nmu
n points in mu (not including zero)
Definition: velocity_grid.hpp:13
bool is_rank_zero()
Definition: globals.hpp:27
MPI_Comm SML_COMM_WORLD
Definition: my_mpi.cpp:4
int nvp
n points in parallel velocity (not including zero)
Definition: velocity_grid.hpp:9
Kokkos::View< double ******, Device > M_ab
Definition: col_grid.hpp:165
static KOKKOS_INLINE_FUNCTION void LU_matrix_ftn(int mesh_ind, int gri, int grj, int spi, int cell_i, int cell_j, const CollisionVelocityGrids< Device > &col_vgrids, int mprl_col, int mat_pos, double coeff1, double coeff2, const TmpColData< Device > &tcd, const Kokkos::View< int **, Device > &index_map_LU_d, typename Collisions::GridMatrix< Device >::values_array_t LU_values)
Definition: col_grid.tpp:836
int batch_size
Definition: col_grid.hpp:213
T get(const string &param, const T default_val, int val_ind=0)
Definition: NamelistReader.hpp:373
constexpr double BYTES_TO_GB
Definition: constants.hpp:12
Kokkos::View< Convergence::Status *, HostType > core(CollisionVelocityGrids< Device > &col_vgrids, CollisionSpecies< Device > &col_spall, TmpColData< Device > &tcd, int mb_n_nodes) const
Definition: col_grid.tpp:112
Kokkos::View< double *****, Device > M_s
Definition: col_grid.hpp:166
Definition: col_grid.hpp:209
Kokkos::DualView< double ****, Kokkos::LayoutRight, Device > pdf_np1
Definition: col_species.hpp:88
Definition: velocity_grid.hpp:8
void set(int mesh_ind, const CollisionSpecies< Device > &col_spall)
Definition: col_grid.hpp:108
bool present(const string &param)
Definition: NamelistReader.hpp:363
Definition: col_grid.hpp:47
Kokkos::View< double ***, Device > fhalf
Definition: col_grid.hpp:161
Status
Definition: col_grid.hpp:46
Definition: col_grid.hpp:52
Collisions::LinAlgBackend labackend
Definition: col_grid.hpp:215
double p_sum
Definition: col_grid.hpp:66
Kokkos::View< value_type ***, Kokkos::LayoutRight, Device, Kokkos::MemoryTraits< Kokkos::Unmanaged >> values_array_t
Definition: col_grid_matrix.hpp:31
Kokkos::View< double ***, Device > gammac_spall
Definition: col_grid.hpp:158
Definition: NamelistReader.hpp:193
Definition: magnetic_field.hpp:12
Definition: moments.hpp:9
KOKKOS_INLINE_FUNCTION bool node_is_in_included_region(const int inode, const bool exclude_private_region) const
Definition: grid.tpp:225
bool en_exit_ok
Definition: col_grid.hpp:74
Definition: col_grid.hpp:30
Kokkos::DualView< CollisionSpeciesScalars **, Device > s
Definition: col_species.hpp:85
double dn_n_max
Definition: col_grid.hpp:72
CollisionGrid()
Definition: col_grid.hpp:229
int ginkgo_max_iterations
Definition: col_grid.hpp:218
T::value_type max_view(const T &view)
Definition: view_arithmetic.hpp:118
KOKKOS_INLINE_FUNCTION double psi_norm() const
Definition: magnetic_field.tpp:3
Definition: col_vgrids.hpp:13
void core_delta_init(int mb_n_nodes, int gri, int grj, int spi, CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall) const
Definition: col_grid.tpp:775
View< double *, Device > get_numeric_v_thermal_equil(int mb_n_nodes, int spi, int grj, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall) const
Definition: col_grid.tpp:743
KOKKOS_INLINE_FUNCTION double vp_vol_fac(int ivz) const
Definition: col_vgrids.hpp:107
static KOKKOS_INLINE_FUNCTION void E_and_D_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, int gri, int grj)
Definition: col_grid.tpp:513
Definition: col_grid.hpp:48
VertexList vertices
Definition: col_grid.hpp:224
Definition: col_grid.hpp:155
VertexList vertices_in_range(const MagneticField< DeviceType > &magnetic_field, const Grid< DeviceType > &grid, bool exclude_private_region, double inner_psi_bound, double outer_psi_bound)
Definition: col_grid.hpp:195
void LU_matrix(int mb_n_nodes, int gri, int grj, int spi, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, Collisions::GridMatrix< Device > *const mtx) const
Definition: col_grid.tpp:881
const int vpic_inner_iter_max
Definition: col_grid.hpp:40
Kokkos::View< double *, Kokkos::LayoutRight, Device > psi
An array of psi coordinates.
Definition: grid.hpp:176
static KOKKOS_INLINE_FUNCTION void E_and_D_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const TmpColData< Device > &tcd, int gri)
Definition: col_grid.tpp:682
View< double ***, CLayout, HostType > mesh_r
Definition: col_vgrids.hpp:20
void core_init(int isp, int mesh_ind, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall) const
Definition: col_grid.tpp:339
double dw_sum
Definition: col_grid.hpp:70
double outpsi
Boundary condition used in a few spots.
Definition: magnetic_field.hpp:43
Kokkos::DualView< double ****, Kokkos::LayoutRight, Device > pdf_n
Definition: col_species.hpp:87
void use_namelist(const string &namelist)
Definition: NamelistReader.hpp:355
Definition: col_grid.hpp:63
Definition: col_grid.hpp:32
Definition: memory_prediction.hpp:4
void eval(int isp, int mesh_ind, const CollisionVelocityGrids< Device > &cvg, const CollisionSpecies< Device > &cs, double &dw, double &dp, double &dn_n) const
Definition: col_grid.hpp:79
void E_and_D(int mb_n_nodes, int gri, int grj, const CollisionVelocityGrids< Device > &col_vgrids, TmpColData< Device > &tcd) const
Definition: col_grid.tpp:475
Definition: col_grid.hpp:49
Kokkos::View< double ***, Device > dfdz
Definition: col_grid.hpp:163
void picard_loop(int vpic_inner_iter_max, const CollisionVelocityGrids< Device > &col_vgrids, CollisionSpecies< Device > &col_spall, TmpColData< Device > &tcd, int mb_n_nodes, Kokkos::View< Convergence::Status *, HostType > &convergence_status, Kokkos::View< Convergence::Moments *, HostType > &moments) const
Definition: col_grid.tpp:222
View< int *, CLayout, HostType > n_subcycles
Number of subcycles for each vertex.
Definition: col_grid.hpp:223
Definition: col_grid.hpp:35
double ginkgo_residual_reduction
Definition: col_grid.hpp:217
static KOKKOS_INLINE_FUNCTION void angle_avg_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, int gri)
Definition: col_grid.tpp:566
bool dens_exit_ok
Definition: col_grid.hpp:73
bool namelist_present(const string &namelist)
Definition: NamelistReader.hpp:351
Definition: col_grid.hpp:31
CollisionGrid(NLReader::NamelistReader &nlr, const MagneticField< DeviceType > &magnetic_field, const Grid< DeviceType > &grid, bool exclude_private_region, bool overwrite_existing_files)
Definition: col_grid.hpp:232
View< double ***, CLayout, HostType > mesh_z
Definition: col_vgrids.hpp:21
Kokkos::View< double ****, Device > EDs
Definition: col_grid.hpp:164
Definition: col_grid.hpp:33
void exit_XGC(std::string msg)
Definition: globals.hpp:37
Definition: col_grid.hpp:54
Definition: magnetic_field.F90:1
LinAlgBackend
Definition: col_grid_matrix.hpp:221
int n() const
Definition: col_species.hpp:181
Definition: col_grid.hpp:51
Kokkos::View< double ***, Device > dfdr
Definition: col_grid.hpp:162
static KOKKOS_INLINE_FUNCTION void angle_avg_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, int gri, int grj)
Definition: col_grid.tpp:405
int start_step
starting step for collisions; should be part of step trigger
Definition: col_grid.hpp:220
Definition: vertex_list.hpp:53
bool mom_exit_ok
Definition: col_grid.hpp:75
View< double ***, CLayout, HostType > vol_h
Definition: col_vgrids.hpp:23
Definition: species.hpp:75
static MemoryPrediction estimate_memory_usage(NLReader::NamelistReader &nlr)
Definition: col_grid.hpp:320
std::shared_ptr< XGC_IO_Stream > io_stream
Definition: col_grid.hpp:227
Definition: col_species.hpp:81
int nnode
Number of grid nodes.
Definition: grid.hpp:165
Kokkos::DualView< int[4][4], Device > mat_pos_rel_indx
Definition: col_grid.hpp:159
void angle_avg(int mb_n_nodes, int gri, int grj, CollisionVelocityGrids< Device > &col_vgrids, TmpColData< Device > &tcd) const
Definition: col_grid.tpp:387
bool is_okay(Status convergence_status)
Definition: col_grid.hpp:57
Definition: col_grid.hpp:53
double dp_sum
Definition: col_grid.hpp:71
void f_df(int mb_n_nodes, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall, int spi, int grj, TmpColData< Device > &tcd) const
Definition: col_grid.tpp:631
int largest_n_subcycles() const
Definition: col_grid.hpp:316
Kokkos::ViewAllocateWithoutInitializing NoInit
Definition: space_settings.hpp:69
static KOKKOS_INLINE_FUNCTION double get_maxw_fac(double mesh_dr, double mesh_r, double numeric_vth2)
Definition: col_grid.tpp:727
Definition: col_grid.hpp:50
bool evaluate_conservation(int mesh_ind, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall, bool &values_are_all_finite)
Definition: col_grid.hpp:121
bool diag_on
Definition: col_grid.hpp:226
Definition: col_grid.hpp:34
int max_n_subcycles
Maximum number of subcycles that may be attempted.
Definition: col_grid.hpp:222