XGC1
col_grid.hpp
Go to the documentation of this file.
1 #ifndef COL_GRID_HPP
2 #define COL_GRID_HPP
3 
4 #include <vector>
5 #include <stdio.h>
6 #include <Kokkos_Core.hpp>
7 #include <Kokkos_DualView.hpp>
8 
9 #include "xgc_io.hpp"
10 #include "domain_decomposition.hpp"
11 #include "velocity_grid.hpp"
12 #include "vgrid_distribution.hpp"
13 #include "magnetic_field.hpp"
14 
15 #include "col_grid_matrix.hpp"
16 #include "col_species.hpp"
17 #include "col_vgrids.hpp"
18 
19 //> XGC's multi-species Fokker-Planck-Landau collision operator
20 // see R. Hager et al., Journal of Computational Physics 315 (2016), pp. 644-660
21 // E. S. Yoon et al., Physics of Plasmas 21 (2014), 032503
22 //
23 // The headers of the subroutines in this module refer to the corresponding parts of Hager et al.
24 // where applicable in order to make the source code easier to understand.
25 
26 
27 // Enum for derivatives of ED array
28 namespace ED{
29 enum{
30  RR=0,
31  RZ,
32  ZZ,
33  ER,
34  EZ,
35  N
36 };
37 }
38 
39 namespace Convergence{
40 
41 // Ideally this would be an enum class,
42 // but the are MPI and I/O operations that expect ints
43 enum Status{
51  InProgress=2
52 };
53 
54 inline bool is_okay(Status convergence_status){
55  return (convergence_status!=MaxedIter &&
56  convergence_status!=NaNOrInfFound &&
57  convergence_status!=FirstStepFail);
58 }
59 
60 }
61 
62 
63 template<class Device>
64 struct TmpColData{
65 
66  // These views are both on CPU and GPU - dual views
67  Kokkos::View<double***,Device> gammac_spall;
68  Kokkos::DualView<int[4][4],Device> mat_pos_rel_indx; // Shouldn't need to be a dual view
69 
70  Kokkos::View<double***,Device> fhalf;
71  Kokkos::View<double***,Device> dfdr;
72  Kokkos::View<double***,Device> dfdz;
73  Kokkos::View<double****,Device> EDs;
74  Kokkos::View<double******,Device> M_ab;
75  Kokkos::View<double*****,Device> M_s;
76 
77  TmpColData<Device>(int nvr, int nvz, int sp_num, int n_vgrids, int mb_n_nodes)
78  : // Note: layout left
79  gammac_spall("gammac_spall",mb_n_nodes,sp_num,sp_num),
80  fhalf("fhalf",mb_n_nodes,nvz-1,nvr-1),
81  dfdr("dfdr",mb_n_nodes,nvz-1,nvr-1),
82  dfdz("dfdz",mb_n_nodes,nvz-1,nvr-1),
83  EDs("EDs",mb_n_nodes,nvz-1,nvr-1,ED::N),
84  M_s("M_s",mb_n_nodes,n_vgrids, (nvr-1)*(nvz-1), ED::N, nvr-1),
85  M_ab("M_ab",mb_n_nodes,n_vgrids, n_vgrids-1, (nvr-1)*(nvz-1), 3, (nvr-1)*(nvz-1)),
86  mat_pos_rel_indx("mat_pos_rel_indx")
87  {
88  // Set up mat_pos_rel_indx and send to GPU
89  int mat_loc[16] = {4,5,7,8,
90  1,2,4,5,
91  3,4,6,7,
92  0,1,3,4};
93 
94  for (int i=0;i<16;i++)
95  mat_pos_rel_indx.h_view(i/4,i%4)=mat_loc[i];
96 
97 #ifdef USE_GPU
98  Kokkos::deep_copy(mat_pos_rel_indx.d_view, mat_pos_rel_indx.h_view);
99 #endif
100  }
101 };
102 
103 
104 inline VertexList vertices_in_range(const MagneticField<DeviceType>& magnetic_field, const Grid<DeviceType>& grid, bool exclude_private_region, double inner_psi_bound, double outer_psi_bound){
105  return VertexList(grid.nnode, KOKKOS_LAMBDA(const int inode){
106  if(inner_psi_bound <= grid.psi(inode) &&
107  outer_psi_bound >= grid.psi(inode) &&
108  grid.node_is_in_included_region(inode, exclude_private_region))
109  {
110  return true;
111  }else{
112  return false;
113  }
114  });
115 }
116 
117 template<class Device>
119 
120  public:
121 
123 
125 
128 
130 
132  View<int*,CLayout,HostType> n_subcycles;
133  View<float*,CLayout,HostType> timing_all; // Vertex timing
134  View<bool[1],CLayout,HostType> timing_available; // Whether timing has begun yet
136 
138 
139  bool diag_on; // Switches file-output of convergence status of the collision operator on/off
140  std::shared_ptr<XGC_IO_Stream> io_stream;
141 
143 
144  //> Set up global variables of the collision operator
145  CollisionGrid(NLReader::NamelistReader& nlr, const MagneticField<DeviceType>& magnetic_field, const Grid<DeviceType>& grid, bool exclude_private_region, bool overwrite_existing_files)
146  : n_subcycles(NoInit("n_subcycles"), grid.nnode),
147  timing_all("timing_all", grid.nnode), // Init to 0
148  timing_available("timing_available")
149  {
150  nlr.use_namelist("col_param");
151  double inner_psi_bound = nlr.get<double>("col_pin",magnetic_field.inpsi); // Minimum of psi range where collisions are performed. @parent: col_param:col_mode=4
152  double outer_psi_bound = nlr.get<double>("col_pout",magnetic_field.outpsi); // Maximum of psi range where collisions are performed. @parent: col_param:col_mode=4
153 
154  // Normalize by xpt
155  // Default values are already normalized, so only normalize if not default
156  if(nlr.present("col_pin")) inner_psi_bound *= magnetic_field.psi_norm();
157  if(nlr.present("col_pout")) outer_psi_bound *= magnetic_field.psi_norm();
158 
159  max_n_subcycles = nlr.get<int>("col_max_n_subcycles", 1); // Enables collision subcycling. Collision time step of an unconverged vertex will be reduced, increasing the number of cycles of that vertex up to a maximum of col_max_n_subcycles. If it still does not converge, the vertex will not be attempted again. @parent: col_param:col_mode=4
160 
161  start_step = nlr.get<int>("col_f_start", 1); // The time step at which collisions begin @parent: col_param:col_mode=4
162 
165  // Initially, no timing data is available
166  timing_available(0) = false;
167 
168  // Start off with every vertex having 1 subcycle
169  Kokkos::deep_copy(n_subcycles, 1);
170 
171  int default_batch_size = get_default_batch_size();
172 
173  // Set default solver option
174 #if defined(USE_GINKGO) && defined(USE_GPU)
176 #else
178 #endif
179 
180  nlr.use_namelist("performance_param", NLReader::NamelistReader::Optional);
181  batch_size = nlr.get<int>("mesh_batch_size", default_batch_size); // Collisions are batched (multiple vertices run in parallel) for better performance. The best value may vary by platform. Typically higher is better (for GPU simulations) but memory usage is linearly proportional to this value. @parent: col_param:col_mode=4
182  if(nlr.present("collisions_solver")){
183  std::string linalg_backend_str = nlr.get<std::string>("collisions_solver", "lapack"); // Which collisions solver to use.
184  // "lapack" is always used for CPU-only simulations
185  // "ginkgo" is available for GPU and improves performance
186  // @parent: col_param:col_mode=4
187  if(linalg_backend_str=="lapack") labackend = Collisions::LinAlgBackend::lapack;
188  else if(linalg_backend_str=="ginkgo") labackend = Collisions::LinAlgBackend::ginkgo;
189  else exit_XGC("\nError: Invalid input option. collisions_solver must be 'lapack' or 'ginkgo'.\n");
190  }
191  ginkgo_residual_reduction = nlr.get<double>("ginkgo_residual_reduction", 1.0e-16); // For the Ginkgo solver, the residual reduction @parent: performance_param:collisions_solver=ginkgo
192  ginkgo_max_iterations = nlr.get<int>("ginkgo_max_iterations", 300); // For the Ginkgo solver, the maximum number of iterations @parent: performance_param:collisions_solver=ginkgo
193  async_reassign = nlr.get<bool>("col_async_reassign", false); // Asynchronously transfer collision workload between MPI ranks @parent: col_param:col_mode=4
194 
195 #ifndef USE_GINKGO
196  if(labackend == Collisions::LinAlgBackend::ginkgo) exit_XGC("\nError: collisions_solver=='ginkgo' was specified, but XGC was configured without Ginkgo.\n");
197 #endif
198 #ifndef USE_GPU
199  if(labackend == Collisions::LinAlgBackend::ginkgo) exit_XGC("\nError: The CPU-only version of XGC cannot use collisions_solver=='ginkgo'.\n");
200 #endif
201 
202  if(is_rank_zero()){
203  printf("\nUsing %s for collision solver.\n", (labackend == Collisions::LinAlgBackend::ginkgo ? "Ginkgo" : "LAPACK"));
204  }
205 
206 
207  // Save list of vertices on which to do collisions
208  vertices = vertices_in_range(magnetic_field, grid, exclude_private_region, inner_psi_bound, outer_psi_bound);
209 
210  // Diagnostic
211  nlr.use_namelist("diag_param");
212  diag_on = nlr.get<bool>("diag_col_convergence_stat_on",false);
213 
214  if(diag_on){
215  io_stream = std::make_shared<XGC_IO_Stream>();
216  io_stream->Init("collision_stats");
217 #ifdef USE_MPI
218  io_stream->Open("xgc.col_conv_status.bp", XGC_IO_Mode::Write, SML_COMM_WORLD);
219 #else
220  io_stream->Open("xgc.col_conv_status.bp", XGC_IO_Mode::Write);
221 #endif
222  }
223  }
224 
225  bool timing_is_available() const{
226  return timing_available(0);
227  }
228 
230  // These defaults have not been systematically optimized
231 #ifdef USE_GPU
232  return 64;
233 #else
234 #ifdef USE_OMP
235  int n_threads = omp_get_max_threads();
236  int dbs=1; while ( dbs < n_threads ) dbs*=2; // get next power of 2 above n_threads so that all threads are busy(?)
237  return dbs;
238 #else
239  return 1;
240 #endif
241 #endif
242  }
243 
245  nlr.use_namelist("ptl_param");
246  int plasma_nspecies = nlr.get<int>("ptl_nsp",1);
247 
248  nlr.use_namelist("sml_param");
249  bool sml_electron_on = nlr.get<bool>("sml_electron_on", false);
250  if(sml_electron_on) plasma_nspecies += 1; // Add electrons
251 
252  VelocityGrid vgrid(nlr);
253  int default_batch_size = get_default_batch_size();
254 
255  int col_grid_batch_size;
256  nlr.use_namelist("performance_param", NLReader::NamelistReader::Optional);
257  col_grid_batch_size = nlr.get<int>("mesh_batch_size", default_batch_size);
258 
259  double device_col_GB_per_vertex = plasma_nspecies*std::max(plasma_nspecies-1,1)*(vgrid.nvp*vgrid.nmu)*(vgrid.nvp*vgrid.nmu)*8*BYTES_TO_GB;
260  double gpu_memory_usage = (col_grid_batch_size*device_col_GB_per_vertex);
261 
262  MemoryPrediction memory_prediction{"Fields", 0.0, gpu_memory_usage};
263 
264  return memory_prediction;
265  }
266 
267  // Get maxw_fac
268  static KOKKOS_INLINE_FUNCTION double get_maxw_fac(double mesh_dr, double mesh_r, double numeric_vth2) ;
269 
270  // Get numeric v_thermal equilibrium for a given species against a given grid
271  View<double*,Device> get_numeric_v_thermal_equil(int mb_n_nodes, int spi, int grj, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall) const;
272 
273  // Delta init
274  void core_delta_init(int mb_n_nodes, int gri, int grj, int spi, CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall) const;
275 
276  // LU_matrix_ftn
277  static KOKKOS_INLINE_FUNCTION void LU_matrix_ftn(int mesh_ind, int gri, int grj, int spi, int cell_i,int cell_j,const CollisionVelocityGrids<Device>& col_vgrids,int mprl_col, int mat_pos,double coeff1,double coeff2, const TmpColData<Device>& tcd, const Kokkos::View<int**,Device>& index_map_LU_d,typename Collisions::GridMatrix<Device>::values_array_t LU_values);
278 
279  // LU_matrix
280  void LU_matrix(int mb_n_nodes, int gri, int grj, int spi, const CollisionVelocityGrids<Device>& col_vgrids,const TmpColData<Device>& tcd, Collisions::GridMatrix<Device>* const mtx) const;
281 
282  // the core loop including the picard loop is now inside this function
283  void picard_loop(int vpic_inner_iter_max, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall, TmpColData<Device>& tcd, const std::unique_ptr<Collisions::GridMatrix<Device>>& matrix, int mb_n_nodes, Kokkos::View<Convergence::Status*,HostType>& convergence_status) const;
284 
285  // dispatch for E_and_D calc
286  void E_and_D(int mb_n_nodes, int gri, int grj, const CollisionVelocityGrids<Device>& col_vgrids, TmpColData<Device>& tcd) const;
287 
288  // E_and_D_s
289  static KOKKOS_INLINE_FUNCTION void E_and_D_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const TmpColData<Device>& tcd, int gri) ;
290 
291  // dispatch for angle_avg calc
292  void angle_avg(int mb_n_nodes, int gri, int grj, CollisionVelocityGrids<Device>& col_vgrids, TmpColData<Device>& tcd) const;
293 
294  // angle_avg_s
295  static KOKKOS_INLINE_FUNCTION void angle_avg_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids<Device>& col_vgrids, const TmpColData<Device>& tcd, int gri);
296 
297  // E_and_D_ab
298  static KOKKOS_INLINE_FUNCTION void E_and_D_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids<Device>& col_vgrids, const TmpColData<Device>& tcd, int gri, int grj);
299 
300  // angle_avg_ab
301  static KOKKOS_INLINE_FUNCTION void angle_avg_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids<Device>& col_vgrids, const TmpColData<Device>& tcd, int gri, int grj);
302 
303  void f_df(int mb_n_nodes, const CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall,int spi,int grj,TmpColData<Device>& tcd) const;
304 
305  // Main collision algorithm
306  Kokkos::View<Convergence::Status*,HostType> core(CollisionVelocityGrids<Device>& col_vgrids, const CollisionSpecies<Device>& col_spall, TmpColData<Device>& tcd, const std::unique_ptr<Collisions::GridMatrix<Device>>& solve_matrix, int mb_n_nodes) const;
307 
308 
309  // Carries out collisions
310  void collision(const CollisionSpecies<Device>& col_spall, const VertexList& assigned, const View<int*,CLayout,HostType>& n_subcycles_local, double dt, const VGridDistribution<HostType>& df_out, const View<int*,CLayout,HostType>& converged_local, const View<double*,CLayout,HostType>& node_cost) const;
311 };
312 
313 #endif
Definition: col_grid.hpp:118
int ginkgo_max_iterations
Definition: col_grid.hpp:127
void LU_matrix(int mb_n_nodes, int gri, int grj, int spi, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, Collisions::GridMatrix< Device > *const mtx) const
Definition: col_grid.cpp:1240
static int get_default_batch_size()
Definition: col_grid.hpp:229
View< double *, Device > get_numeric_v_thermal_equil(int mb_n_nodes, int spi, int grj, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall) const
Definition: col_grid.cpp:1097
bool diag_on
Definition: col_grid.hpp:139
void picard_loop(int vpic_inner_iter_max, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall, TmpColData< Device > &tcd, const std::unique_ptr< Collisions::GridMatrix< Device >> &matrix, int mb_n_nodes, Kokkos::View< Convergence::Status *, HostType > &convergence_status) const
Definition: col_grid.cpp:628
void f_df(int mb_n_nodes, const CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall, int spi, int grj, TmpColData< Device > &tcd) const
Definition: col_grid.cpp:977
VertexList vertices
Definition: col_grid.hpp:135
Collisions::LinAlgBackend labackend
Definition: col_grid.hpp:124
int start_step
starting step for collisions; should be part of step trigger
Definition: col_grid.hpp:129
void angle_avg(int mb_n_nodes, int gri, int grj, CollisionVelocityGrids< Device > &col_vgrids, TmpColData< Device > &tcd) const
Definition: col_grid.cpp:733
CollisionGrid(NLReader::NamelistReader &nlr, const MagneticField< DeviceType > &magnetic_field, const Grid< DeviceType > &grid, bool exclude_private_region, bool overwrite_existing_files)
Definition: col_grid.hpp:145
static KOKKOS_INLINE_FUNCTION void LU_matrix_ftn(int mesh_ind, int gri, int grj, int spi, int cell_i, int cell_j, const CollisionVelocityGrids< Device > &col_vgrids, int mprl_col, int mat_pos, double coeff1, double coeff2, const TmpColData< Device > &tcd, const Kokkos::View< int **, Device > &index_map_LU_d, typename Collisions::GridMatrix< Device >::values_array_t LU_values)
Definition: col_grid.cpp:1195
View< float *, CLayout, HostType > timing_all
Definition: col_grid.hpp:133
std::shared_ptr< XGC_IO_Stream > io_stream
Definition: col_grid.hpp:140
void collision(const CollisionSpecies< Device > &col_spall, const VertexList &assigned, const View< int *, CLayout, HostType > &n_subcycles_local, double dt, const VGridDistribution< HostType > &df_out, const View< int *, CLayout, HostType > &converged_local, const View< double *, CLayout, HostType > &node_cost) const
Definition: col_grid.cpp:19
void E_and_D(int mb_n_nodes, int gri, int grj, const CollisionVelocityGrids< Device > &col_vgrids, TmpColData< Device > &tcd) const
Definition: col_grid.cpp:821
static KOKKOS_INLINE_FUNCTION double get_maxw_fac(double mesh_dr, double mesh_r, double numeric_vth2)
Definition: col_grid.cpp:1077
static KOKKOS_INLINE_FUNCTION void angle_avg_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, int gri)
Definition: col_grid.cpp:912
int batch_size
Definition: col_grid.hpp:122
Kokkos::View< Convergence::Status *, HostType > core(CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall, TmpColData< Device > &tcd, const std::unique_ptr< Collisions::GridMatrix< Device >> &solve_matrix, int mb_n_nodes) const
Definition: col_grid.cpp:516
void core_delta_init(int mb_n_nodes, int gri, int grj, int spi, CollisionVelocityGrids< Device > &col_vgrids, const CollisionSpecies< Device > &col_spall) const
Definition: col_grid.cpp:1134
View< bool[1], CLayout, HostType > timing_available
Definition: col_grid.hpp:134
double ginkgo_residual_reduction
Definition: col_grid.hpp:126
static KOKKOS_INLINE_FUNCTION void E_and_D_s(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const TmpColData< Device > &tcd, int gri)
Definition: col_grid.cpp:1032
CollisionGrid()
Definition: col_grid.hpp:142
bool timing_is_available() const
Definition: col_grid.hpp:225
int max_n_subcycles
Maximum number of subcycles that may be attempted.
Definition: col_grid.hpp:131
bool async_reassign
Definition: col_grid.hpp:137
View< int *, CLayout, HostType > n_subcycles
Number of subcycles for each vertex.
Definition: col_grid.hpp:132
static KOKKOS_INLINE_FUNCTION void angle_avg_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, int gri, int grj)
Definition: col_grid.cpp:751
static MemoryPrediction estimate_memory_usage(NLReader::NamelistReader &nlr)
Definition: col_grid.hpp:244
static KOKKOS_INLINE_FUNCTION void E_and_D_ab(int idx, int mb_n_nodes, int nvrm1, int nvzm1, const CollisionVelocityGrids< Device > &col_vgrids, const TmpColData< Device > &tcd, int gri, int grj)
Definition: col_grid.cpp:859
Definition: col_species.hpp:105
Definition: col_grid_matrix.hpp:24
Kokkos::View< value_type ***, Kokkos::LayoutRight, Device, Kokkos::MemoryTraits< Kokkos::Unmanaged > > values_array_t
Definition: col_grid_matrix.hpp:31
Kokkos::View< double *, Kokkos::LayoutRight, Device > psi
An array of psi coordinates.
Definition: grid.hpp:185
KOKKOS_INLINE_FUNCTION bool node_is_in_included_region(const int inode, const bool exclude_private_region) const
Definition: grid.tpp:239
int nnode
Number of grid nodes.
Definition: grid.hpp:174
Definition: magnetic_field.hpp:12
Definition: NamelistReader.hpp:193
T get(const string &param, const T default_val, int val_ind=0)
Definition: NamelistReader.hpp:386
void use_namelist(const string &namelist, Options required=Required)
Definition: NamelistReader.hpp:360
@ Optional
Definition: NamelistReader.hpp:287
bool present(const string &param)
Definition: NamelistReader.hpp:374
Definition: vertex_list.hpp:53
VertexList vertices_in_range(const MagneticField< DeviceType > &magnetic_field, const Grid< DeviceType > &grid, bool exclude_private_region, double inner_psi_bound, double outer_psi_bound)
Definition: col_grid.hpp:104
constexpr double BYTES_TO_GB
Definition: constants.hpp:13
void exit_XGC(std::string msg)
Definition: globals.hpp:37
bool is_rank_zero()
Definition: globals.hpp:27
MPI_Comm SML_COMM_WORLD
Definition: my_mpi.cpp:4
LinAlgBackend
Definition: col_grid_matrix.hpp:227
Definition: col_grid.hpp:39
Status
Definition: col_grid.hpp:43
@ NaNOrInfFound
Definition: col_grid.hpp:44
@ InProgress
Definition: col_grid.hpp:51
@ TooManyNegValues
Definition: col_grid.hpp:46
@ MaxedIter
Definition: col_grid.hpp:45
@ PosCorrectionFail
Definition: col_grid.hpp:47
@ FirstStepFail
Definition: col_grid.hpp:48
@ NotAttempted
Definition: col_grid.hpp:49
@ Success
Definition: col_grid.hpp:50
bool is_okay(Status convergence_status)
Definition: col_grid.hpp:54
Definition: col_grid.hpp:28
@ ZZ
Definition: col_grid.hpp:32
@ RR
Definition: col_grid.hpp:30
@ EZ
Definition: col_grid.hpp:34
@ N
Definition: col_grid.hpp:35
@ ER
Definition: col_grid.hpp:33
@ RZ
Definition: col_grid.hpp:31
Definition: magnetic_field.F90:1
logical sml_electron_on
Whether to run with adiabatic (.false.) or kinetic (.true.) electrons.
Definition: module.F90:251
Kokkos::ViewAllocateWithoutInitializing NoInit
Definition: space_settings.hpp:69
Definition: col_vgrids.hpp:13
Definition: memory_prediction.hpp:4
Definition: col_grid.hpp:64
Kokkos::DualView< int[4][4], Device > mat_pos_rel_indx
Definition: col_grid.hpp:68
Kokkos::View< double ****, Device > EDs
Definition: col_grid.hpp:73
Kokkos::View< double *****, Device > M_s
Definition: col_grid.hpp:75
Kokkos::View< double ***, Device > dfdz
Definition: col_grid.hpp:72
Kokkos::View< double ***, Device > fhalf
Definition: col_grid.hpp:70
Kokkos::View< double ***, Device > dfdr
Definition: col_grid.hpp:71
Kokkos::View< double ******, Device > M_ab
Definition: col_grid.hpp:74
Kokkos::View< double ***, Device > gammac_spall
Definition: col_grid.hpp:67
Definition: velocity_grid.hpp:8
int nmu
n points in mu (not including zero)
Definition: velocity_grid.hpp:13
int nvp
n points in parallel velocity (not including zero)
Definition: velocity_grid.hpp:9