XGCa
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
col_grid.hpp
Go to the documentation of this file.
1 #ifndef COL_GRID_HPP
2 #define COL_GRID_HPP
3 
4 #include <vector>
5 #include <stdio.h>
6 #include <Kokkos_Core.hpp>
7 #include <Kokkos_DualView.hpp>
8 
9 #include "xgc_io.hpp"
10 #include "domain_decomposition.hpp"
11 #include "velocity_grid.hpp"
12 #include "vgrid_distribution.hpp"
13 #include "magnetic_field.hpp"
14 
15 #include "col_grid_matrix.hpp"
16 #include "col_species.hpp"
17 #include "col_vgrids.hpp"
18 
19 //> XGC's multi-species Fokker-Planck-Landau collision operator
20 // see R. Hager et al., Journal of Computational Physics 315 (2016), pp. 644-660
21 // E. S. Yoon et al., Physics of Plasmas 21 (2014), 032503
22 //
23 // The headers of the subroutines in this module refer to the corresponding parts of Hager et al.
24 // where applicable in order to make the source code easier to understand.
25 
26 
27 // Enum for derivatives of ED array
28 namespace ED{
29 enum{
30  RR=0,
31  RZ,
32  ZZ,
33  ER,
34  EZ,
35  N
36 };
37 }
38 
39 // <SOLVER-related>
40 const int vpic_inner_iter_max=20;
41 
42 namespace Convergence{
43 
44 // Ideally this would be an enum class,
45 // but the are MPI and I/O operations that expect ints
46 enum Status{
55 };
56 
57 inline bool is_okay(Status convergence_status){
58  return (convergence_status!=MaxedIter &&
59  convergence_status!=NaNOrInfFound &&
60  convergence_status!=FirstStepFail);
61 }
62 
63 class Moments{
64  // Values
65  double w_sum; //> sum of energy over species
66  double p_sum; //> sum of momentum over species
67  double min_p_thres; //> momentum error criterion
68 
69  // Deltas
70  double dw_sum; //> sum of energy change over species (i.e. error in energy)
71  double dp_sum; //> sum of momentum change over species (i.e. error in momentum)
72  double dn_n_max; //> maximum fraction of density changes among species (i.e. maximum relative error in density in all species )
74  bool en_exit_ok;
76 
77  //> Checks the convergence criterion (i.e. conservation of mass, energy and momentum)
78  template<class Device>
79  inline void eval(int isp, int mesh_ind, const CollisionVelocityGrids<Device>& cvg, const CollisionSpecies<Device>& cs,double& dw,double& dp,double& dn_n) const{
80  double vpic_dn = 0.0;
81  double vpic_dw = 0.0;
82  double vpic_dp = 0.0;
83 
84  int gri = cs.s.h_view(isp, mesh_ind).grid_index;
85 
86  for (int index_i = 0; index_i<cvg.mesh_z.extent(2); index_i++){
87  double mesh_z2 = cvg.mesh_z(mesh_ind,gri,index_i)*cvg.mesh_z(mesh_ind,gri,index_i);
88  for (int index_j = 0; index_j<cvg.mesh_r.extent(2); index_j++){
89  double mesh_r2 = cvg.mesh_r(mesh_ind,gri,index_j)*cvg.mesh_r(mesh_ind,gri,index_j);
90  double vpic_dfc = (cs.pdf_np1.h_view(mesh_ind,isp,index_i,index_j) - cs.pdf_n.h_view(mesh_ind,isp,index_i,index_j)) * cvg.vol_h(mesh_ind,gri,index_j);
91 
92  vpic_dn += vpic_dfc;
93  vpic_dw += vpic_dfc*(mesh_z2 + mesh_r2);
94  vpic_dp += vpic_dfc*cvg.mesh_z(mesh_ind,gri,index_i);
95  }
96  }
97 
98  dw = vpic_dw*cs.s.h_view(isp,mesh_ind).mass;
99  dp = vpic_dp*cs.s.h_view(isp,mesh_ind).mass;
100  dn_n = std::abs(vpic_dn/cs.s.h_view(isp,mesh_ind).dens);
101  }
102 
103  public:
104 
105  template<class Device>
106  inline void set(int mesh_ind, const CollisionSpecies<Device>& col_spall){
107  w_sum = 0;
108  p_sum = 0;
109  min_p_thres = 1e99; //just large value to be reinitialized with a small value
110  for (int spi = 0; spi<col_spall.n(); spi++){
111  w_sum += col_spall.s.h_view(spi,mesh_ind).ens;
112  p_sum += col_spall.s.h_view(spi,mesh_ind).mom;
113  min_p_thres = std::min( col_spall.s.h_view(spi,mesh_ind).mass*col_spall.s.h_view(spi,mesh_ind).dens*sqrt(col_spall.s.h_view(spi,mesh_ind).numeric_vth2),
114  min_p_thres );
115  }
116  }
117 
118  template<class Device>
119  inline bool evaluate_conservation(int mesh_ind, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall, bool& values_are_all_finite){
120  dw_sum = 0.0;
121  dp_sum = 0.0;
122  dn_n_max = 0.0;
123  dens_exit_ok = true;
124 
125  for (int spi = 0; spi<col_spall.n(); spi++){
126  // Re-compute conservation errors after correction
127  double dw,dp,dn_n;
128  eval(spi, mesh_ind, col_vgrids, col_spall,dw,dp,dn_n);// moments evalulation from difference betw. pdf_n and pdf_np1
129  dw_sum += dw;
130  dp_sum += dp;
131  if(dn_n_max<dn_n) dn_n_max = dn_n;
132  dens_exit_ok = dens_exit_ok && (dn_n<1e-10);
133  }
134 
135  // Check for NaNs and Infs
136  values_are_all_finite = (std::isfinite(dw_sum) && std::isfinite(dp_sum) && std::isfinite(dn_n_max));
137 
138  en_exit_ok = std::abs(dw_sum/w_sum) <= 1e-7;
139  // bool mom_exit_ok = std::abs(col_dp_sum)/(std::max(std::abs(col_p_sum),min_p_thres)) <= (col_f_vp_max/col_f_nvz);// v_par = max_v_par of simulation domain / vth
140  // ALS : momentum tolerance changed to the tolerance in XGC's former collisions module:
141  // For momentum tolerance use main ion mass and electron density as normalization (when there are more than 2 species).
142  double mass_use = (col_spall.s.h_view(0,mesh_ind).is_electron ? col_spall.s.h_view(1,mesh_ind).mass : col_spall.s.h_view(0,mesh_ind).mass);
143  mom_exit_ok = std::abs(dp_sum)/(std::max(std::abs(p_sum),1e-3*mass_use*col_spall.s.h_view(0,mesh_ind).dens)) <= 1.0e-7;
144 
145  return (dens_exit_ok && en_exit_ok && mom_exit_ok);
146  }
147 };
148 
149 }
150 
151 
152 template<class Device>
153 struct TmpColData{
154 
155  // These views are both on CPU and GPU - dual views
156  Kokkos::View<double***,Device> gammac_spall;
157  Kokkos::DualView<int[4][4],Device> mat_pos_rel_indx; // Shouldn't need to be a dual view
158 
159  Kokkos::View<double***,Device> fhalf;
160  Kokkos::View<double***,Device> dfdr;
161  Kokkos::View<double***,Device> dfdz;
162  Kokkos::View<double****,Device> EDs;
163  Kokkos::View<double******,Device> M_ab;
164  Kokkos::View<double*****,Device> M_s;
165 
166  TmpColData<Device>(int nvr, int nvz, int sp_num, int n_vgrids, int mb_n_nodes)
167  : // Note: layout left
168  gammac_spall("gammac_spall",mb_n_nodes,sp_num,sp_num),
169  fhalf("fhalf",mb_n_nodes,nvz-1,nvr-1),
170  dfdr("dfdr",mb_n_nodes,nvz-1,nvr-1),
171  dfdz("dfdz",mb_n_nodes,nvz-1,nvr-1),
172  EDs("EDs",mb_n_nodes,nvz-1,nvr-1,ED::N),
173  M_s("M_s",mb_n_nodes,n_vgrids, (nvr-1)*(nvz-1), ED::N, nvr-1),
174  M_ab("M_ab",mb_n_nodes,n_vgrids, n_vgrids-1, (nvr-1)*(nvz-1), 3, (nvr-1)*(nvz-1)),
175  mat_pos_rel_indx("mat_pos_rel_indx")
176  {
177  // Set up mat_pos_rel_indx and send to GPU
178  int mat_loc[16] = {4,5,7,8,
179  1,2,4,5,
180  3,4,6,7,
181  0,1,3,4};
182 
183  for (int i=0;i<16;i++)
184  mat_pos_rel_indx.h_view(i/4,i%4)=mat_loc[i];
185 
186 #ifdef USE_GPU
187  Kokkos::deep_copy(mat_pos_rel_indx.d_view, mat_pos_rel_indx.h_view);
188 #endif
189  }
190 };
191 
192 
193 template<class Device>
195 
196  public:
197 
199 
201 
204 
207 
209 
211  View<int*,CLayout,HostType> n_subcycles;
212 
213  bool diag_on; // Switches file-output of convergence status of the collision operator on/off
214  std::shared_ptr<XGC_IO_Stream> io_stream;
215 
217 
218  //> Set up global variables of the collision operator
219  CollisionGrid(NLReader::NamelistReader& nlr, const MagneticField<DeviceType>& magnetic_field, const Grid<DeviceType>& grid, bool overwrite_existing_files)
220  : n_subcycles(NoInit("n_subcycles"), grid.nnode)
221  {
222  nlr.use_namelist("col_param");
223  inner_psi_bound = nlr.get<double>("col_pin",magnetic_field.inpsi); // Minimum of psi range where collisions are performed.
224  outer_psi_bound = nlr.get<double>("col_pout",magnetic_field.outpsi); // Maximum of psi range where collisions are performed.
225 
226  // Normalize by xpt
227  // Default values are already normalized, so only normalize if not default
228  if(nlr.present("col_pin")) inner_psi_bound *= magnetic_field.psi_norm();
229  if(nlr.present("col_pout")) outer_psi_bound *= magnetic_field.psi_norm();
230 
231  max_n_subcycles = nlr.get<int>("col_max_n_subcycles", 1); // Enables collision subcycling. Collision time step of an unconverged vertex will be reduced, increasing the number of cycles of that vertex up to a maximum of col_max_n_subcycles. If it still does not converge, the vertex will not be attempted again.
232 
233  start_step = nlr.get<int>("col_f_start", 1); // The time step at which collisions begin
234 
237  // Start off with every vertex having 1 subcycle
238  Kokkos::deep_copy(n_subcycles, 1);
239 
240  // These defaults have not been systematically optimized
241 #ifdef USE_GPU
242  int default_batch_size = 64;
243 #else
244  int default_batch_size = 1;//omp_get_max_threads();
245 #endif
246  std::string linalg_backend_str;
247  if(nlr.namelist_present("performance_param")){
248  nlr.use_namelist("performance_param");
249  batch_size = nlr.get<int>("mesh_batch_size", default_batch_size); // Collisions are batched (multiple vertices run in parallel) for better performance. The best value may vary by platform. Typically higher is better (for GPU simulations) but memory usage is linearly proportional to this value.
250  linalg_backend_str = nlr.get<std::string>("collisions_solver", "lapack"); // Which collisions solver to use. "lapack" is on CPU. "ginkgo" is on GPU and improves performance, but the executable must be compiled with USE_GINKGO
251  ginkgo_residual_reduction = nlr.get<double>("ginkgo_residual_reduction", 1.0e-16); // For the Ginkgo solver, the residual reduction
252  ginkgo_max_iterations = nlr.get<int>("ginkgo_max_iterations", 300); // For the Ginkgo solver, the maximum number of iterations
253  }else{
254  batch_size = default_batch_size;
255  linalg_backend_str = "lapack";
256  ginkgo_residual_reduction = 1.0e-16;
257  ginkgo_max_iterations = 300;
258  }
259 
261 
262  // Diagnostic
263  nlr.use_namelist("diag_param");
264  diag_on = nlr.get<bool>("diag_col_convergence_stat_on",false);
265 
266  if(diag_on){
267  io_stream = std::make_shared<XGC_IO_Stream>();
268  io_stream->Init("collision_stats");
269  XGC_IO_Mode mode = (overwrite_existing_files ? XGC_IO_Mode::Write : XGC_IO_Mode::Append);
270  io_stream->Open("xgc.col_conv_status.bp", mode);
271  }
272  }
273 
275  nlr.use_namelist("ptl_param");
276  int plasma_nspecies = nlr.get<int>("ptl_nsp",1);
277 
278  nlr.use_namelist("sml_param");
279  bool sml_electron_on = nlr.get<bool>("sml_electron_on", false);
280  if(sml_electron_on) plasma_nspecies += 1; // Add electrons
281 
282  VelocityGrid vgrid(nlr);
283 #ifdef USE_GPU
284  int default_batch_size = 64;
285 #else
286  int default_batch_size = 1;//omp_get_max_threads();
287 #endif
288  int col_grid_batch_size;
289  if(nlr.namelist_present("performance_param")){
290  nlr.use_namelist("performance_param");
291  col_grid_batch_size = nlr.get<int>("mesh_batch_size", default_batch_size);
292  }else{
293  col_grid_batch_size = default_batch_size;
294  }
295 
296  double device_col_GB_per_vertex = plasma_nspecies*std::max(plasma_nspecies-1,1)*(vgrid.nvp*vgrid.nmu)*(vgrid.nvp*vgrid.nmu)*8*BYTES_TO_GB;
297  double gpu_memory_usage = (col_grid_batch_size*device_col_GB_per_vertex);
298 
299  MemoryPrediction memory_prediction{"Fields", 0.0, gpu_memory_usage};
300 
301  return memory_prediction;
302  }
303 
304  // Core init
305  void core_init(int isp, int mesh_ind, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall ) const;
306 
307  // Get maxw_fac
308  static KOKKOS_INLINE_FUNCTION double get_maxw_fac(double mesh_dr, double mesh_r, double numeric_vth2) ;
309 
310  // Get numeric v_thermal equilibrium for a given species against a given grid
311  View<double*,Device> get_numeric_v_thermal_equil(int mb_n_nodes, int spi, int grj, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall) const;
312 
313  // Delta init
314  void core_delta_init(int mb_n_nodes, int gri, int grj, int spi, CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall) const;
315 
316  // LU_matrix_ftn
317  static KOKKOS_INLINE_FUNCTION void LU_matrix_ftn(int mesh_ind, int gri, int grj, int spi, int cell_i,int cell_j,const CollisionVelocityGrids<Device>& col_vgrids,int mprl_col, int mat_pos,double coeff1,double coeff2, const TmpColData<Device>& tcd, const Kokkos::View<int**,Device>& index_map_LU_d,typename Collisions::GridMatrix<Device>::values_array_t LU_values);
318 
319  // LU_matrix
320  void LU_matrix(int mb_n_nodes, int gri, int grj, int spi, const CollisionVelocityGrids<Device>& col_vgrids,const TmpColData<Device>& tcd, Collisions::GridMatrix<Device>* const mtx) const;
321 
322  // the core loop including the picard loop is now inside this function
323  void picard_loop(int vpic_inner_iter_max, const CollisionVelocityGrids<Device>& col_vgrids, CollisionSpecies<Device>& col_spall, TmpColData<Device>& tcd, int mb_n_nodes, Kokkos::View<Convergence::Status*,HostType>& convergence_status, Kokkos::View<Convergence::Moments*,HostType>& moments) const;
324 
325  // dispatch for E_and_D calc
326  void E_and_D(int mb_n_nodes, int gri, int grj, const CollisionVelocityGrids<Device>& col_vgrids, TmpColData<Device>& tcd) const;
327 
328  // E_and_D_s
329  static KOKKOS_INLINE_FUNCTION void E_and_D_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const TmpColData<Device>& tcd, int gri) ;
330 
331  // dispatch for angle_avg calc
332  void angle_avg(int mb_n_nodes, int gri, int grj, CollisionVelocityGrids<Device>& col_vgrids, TmpColData<Device>& tcd) const;
333 
334  // angle_avg_s
335  static KOKKOS_INLINE_FUNCTION void angle_avg_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids<Device>& col_vgrids, const TmpColData<Device>& tcd, int gri);
336 
337  // E_and_D_ab
338  static KOKKOS_INLINE_FUNCTION void E_and_D_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids<Device>& col_vgrids, const TmpColData<Device>& tcd, int gri, int grj);
339 
340  // angle_avg_ab
341  static KOKKOS_INLINE_FUNCTION void angle_avg_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids<Device>& col_vgrids, const TmpColData<Device>& tcd, int gri, int grj);
342 
343  void f_df(int mb_n_nodes, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall,int spi,int grj,TmpColData<Device>& tcd) const;
344 
345  // Main collision algorithm
346  Kokkos::View<Convergence::Status*,HostType> core(CollisionVelocityGrids<Device>& col_vgrids, CollisionSpecies<Device>& col_spall, TmpColData<Device>& tcd, int mb_n_nodes) const;
347 
348 
349  // Carries out collisions
350  void collision(std::vector<Species<DeviceType>> &all_species, const Moments& moments, const DomainDecomposition<DeviceType>& pol_decomp, const View<bool*, HostType>& in_range, bool symmetric_f, const VGridDistribution<HostType>& f0_f, VGridDistribution<HostType>& df_out, double dt, View<int*,CLayout,HostType>& converged_all, View<double*,CLayout,HostType>& node_cost) const;
351 };
352 
353 #include "col_grid.tpp"
354 
355 #endif
double min_p_thres
Definition: col_grid.hpp:67
double inpsi
Boundary condition used in a few spots.
Definition: magnetic_field.hpp:42
Definition: col_grid_matrix.hpp:23
double w_sum
Definition: col_grid.hpp:65
int nmu
n points in mu (not including zero)
Definition: velocity_grid.hpp:13
int nvp
n points in parallel velocity (not including zero)
Definition: velocity_grid.hpp:9
Kokkos::View< double ******, Device > M_ab
Definition: col_grid.hpp:163
static KOKKOS_INLINE_FUNCTION void LU_matrix_ftn(int mesh_ind, int gri, int grj, int spi, int cell_i, int cell_j, const CollisionVelocityGrids< Device > &col_vgrids, int mprl_col, int mat_pos, double coeff1, double coeff2, const TmpColData< Device > &tcd, const Kokkos::View< int **, Device > &index_map_LU_d, typename Collisions::GridMatrix< Device >::values_array_t LU_values)
Definition: col_grid.tpp:865
int batch_size
Definition: col_grid.hpp:198
T get(const string &param, const T default_val, int val_ind=0)
Definition: NamelistReader.hpp:373
constexpr double BYTES_TO_GB
Definition: constants.hpp:12
Kokkos::View< Convergence::Status *, HostType > core(CollisionVelocityGrids< Device > &col_vgrids, CollisionSpecies< Device > &col_spall, TmpColData< Device > &tcd, int mb_n_nodes) const
Definition: col_grid.tpp:150
Kokkos::View< double *****, Device > M_s
Definition: col_grid.hpp:164
Definition: col_grid.hpp:194
Kokkos::DualView< double ****, Kokkos::LayoutRight, Device > pdf_np1
Definition: col_species.hpp:88
Definition: velocity_grid.hpp:8
void set(int mesh_ind, const CollisionSpecies< Device > &col_spall)
Definition: col_grid.hpp:106
bool present(const string &param)
Definition: NamelistReader.hpp:363
Definition: col_grid.hpp:47
Kokkos::View< double ***, Device > fhalf
Definition: col_grid.hpp:159
Status
Definition: col_grid.hpp:46
Definition: col_grid.hpp:52
void collision(std::vector< Species< DeviceType >> &all_species, const Moments &moments, const DomainDecomposition< DeviceType > &pol_decomp, const View< bool *, HostType > &in_range, bool symmetric_f, const VGridDistribution< HostType > &f0_f, VGridDistribution< HostType > &df_out, double dt, View< int *, CLayout, HostType > &converged_all, View< double *, CLayout, HostType > &node_cost) const
Definition: col_grid.tpp:22
Collisions::LinAlgBackend labackend
Definition: col_grid.hpp:200
double p_sum
Definition: col_grid.hpp:66
Kokkos::View< value_type ***, Kokkos::LayoutRight, Device, Kokkos::MemoryTraits< Kokkos::Unmanaged >> values_array_t
Definition: col_grid_matrix.hpp:31
Kokkos::View< double ***, Device > gammac_spall
Definition: col_grid.hpp:156
Definition: NamelistReader.hpp:193
Definition: magnetic_field.hpp:12
Definition: moments.hpp:9
bool en_exit_ok
Definition: col_grid.hpp:74
Definition: col_grid.hpp:30
Kokkos::DualView< CollisionSpeciesScalars **, Device > s
Definition: col_species.hpp:85
double dn_n_max
Definition: col_grid.hpp:72
CollisionGrid()
Definition: col_grid.hpp:216
int ginkgo_max_iterations
Definition: col_grid.hpp:203
double inner_psi_bound
Inner psi bound; collisions skipped below this.
Definition: col_grid.hpp:205
KOKKOS_INLINE_FUNCTION double psi_norm() const
Definition: magnetic_field.tpp:3
Definition: col_vgrids.hpp:13
void core_delta_init(int mb_n_nodes, int gri, int grj, int spi, CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall) const
Definition: col_grid.tpp:804
View< double *, Device > get_numeric_v_thermal_equil(int mb_n_nodes, int spi, int grj, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall) const
Definition: col_grid.tpp:772
static KOKKOS_INLINE_FUNCTION void E_and_D_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, int gri, int grj)
Definition: col_grid.tpp:542
Definition: col_grid.hpp:48
Definition: col_grid.hpp:153
void LU_matrix(int mb_n_nodes, int gri, int grj, int spi, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, Collisions::GridMatrix< Device > *const mtx) const
Definition: col_grid.tpp:910
const int vpic_inner_iter_max
Definition: col_grid.hpp:40
static KOKKOS_INLINE_FUNCTION void E_and_D_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const TmpColData< Device > &tcd, int gri)
Definition: col_grid.tpp:711
View< double ***, CLayout, HostType > mesh_r
Definition: col_vgrids.hpp:20
void core_init(int isp, int mesh_ind, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall) const
Definition: col_grid.tpp:369
double dw_sum
Definition: col_grid.hpp:70
double outpsi
Boundary condition used in a few spots.
Definition: magnetic_field.hpp:43
Kokkos::DualView< double ****, Kokkos::LayoutRight, Device > pdf_n
Definition: col_species.hpp:87
void use_namelist(const string &namelist)
Definition: NamelistReader.hpp:355
Definition: col_grid.hpp:63
Definition: col_grid.hpp:32
Definition: memory_prediction.hpp:4
void eval(int isp, int mesh_ind, const CollisionVelocityGrids< Device > &cvg, const CollisionSpecies< Device > &cs, double &dw, double &dp, double &dn_n) const
Definition: col_grid.hpp:79
void E_and_D(int mb_n_nodes, int gri, int grj, const CollisionVelocityGrids< Device > &col_vgrids, TmpColData< Device > &tcd) const
Definition: col_grid.tpp:504
Definition: col_grid.hpp:49
Kokkos::View< double ***, Device > dfdz
Definition: col_grid.hpp:161
double outer_psi_bound
Outer psi bound; collisions skipped above this.
Definition: col_grid.hpp:206
void picard_loop(int vpic_inner_iter_max, const CollisionVelocityGrids< Device > &col_vgrids, CollisionSpecies< Device > &col_spall, TmpColData< Device > &tcd, int mb_n_nodes, Kokkos::View< Convergence::Status *, HostType > &convergence_status, Kokkos::View< Convergence::Moments *, HostType > &moments) const
Definition: col_grid.tpp:252
View< int *, CLayout, HostType > n_subcycles
Number of subcycles for each vertex.
Definition: col_grid.hpp:211
Definition: col_grid.hpp:35
double ginkgo_residual_reduction
Definition: col_grid.hpp:202
static KOKKOS_INLINE_FUNCTION void angle_avg_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, int gri)
Definition: col_grid.tpp:595
bool dens_exit_ok
Definition: col_grid.hpp:73
bool namelist_present(const string &namelist)
Definition: NamelistReader.hpp:351
Definition: col_grid.hpp:31
View< double ***, CLayout, HostType > mesh_z
Definition: col_vgrids.hpp:21
XGC_IO_Mode
Definition: xgc_io.hpp:17
Kokkos::View< double ****, Device > EDs
Definition: col_grid.hpp:162
Definition: col_grid.hpp:33
Definition: col_grid.hpp:54
Definition: magnetic_field.F90:1
LinAlgBackend
Definition: col_grid_matrix.hpp:221
int n() const
Definition: col_species.hpp:181
Definition: col_grid.hpp:51
Kokkos::View< double ***, Device > dfdr
Definition: col_grid.hpp:160
static KOKKOS_INLINE_FUNCTION void angle_avg_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, int gri, int grj)
Definition: col_grid.tpp:434
int start_step
starting step for collisions; should be part of step trigger
Definition: col_grid.hpp:208
bool mom_exit_ok
Definition: col_grid.hpp:75
View< double ***, CLayout, HostType > vol_h
Definition: col_vgrids.hpp:23
Definition: species.hpp:75
static MemoryPrediction estimate_memory_usage(NLReader::NamelistReader &nlr)
Definition: col_grid.hpp:274
CollisionGrid(NLReader::NamelistReader &nlr, const MagneticField< DeviceType > &magnetic_field, const Grid< DeviceType > &grid, bool overwrite_existing_files)
Definition: col_grid.hpp:219
std::shared_ptr< XGC_IO_Stream > io_stream
Definition: col_grid.hpp:214
Definition: col_species.hpp:81
Kokkos::DualView< int[4][4], Device > mat_pos_rel_indx
Definition: col_grid.hpp:157
void angle_avg(int mb_n_nodes, int gri, int grj, CollisionVelocityGrids< Device > &col_vgrids, TmpColData< Device > &tcd) const
Definition: col_grid.tpp:416
bool is_okay(Status convergence_status)
Definition: col_grid.hpp:57
Definition: col_grid.hpp:53
double dp_sum
Definition: col_grid.hpp:71
void f_df(int mb_n_nodes, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall, int spi, int grj, TmpColData< Device > &tcd) const
Definition: col_grid.tpp:660
Kokkos::ViewAllocateWithoutInitializing NoInit
Definition: space_settings.hpp:69
static KOKKOS_INLINE_FUNCTION double get_maxw_fac(double mesh_dr, double mesh_r, double numeric_vth2)
Definition: col_grid.tpp:756
Definition: col_grid.hpp:50
bool evaluate_conservation(int mesh_ind, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall, bool &values_are_all_finite)
Definition: col_grid.hpp:119
bool diag_on
Definition: col_grid.hpp:213
Definition: col_grid.hpp:34
int max_n_subcycles
Maximum number of subcycles that may be attempted.
Definition: col_grid.hpp:210