PLUTO
PatchCTU.cpp
Go to the documentation of this file.
1 #include <cstdio>
2 #include <string>
3 using std::string;
4 
5 #include "PatchPluto.H"
6 #include "LoHiSide.H"
7 
8 /* ********************************************************************* */
9 void PatchPluto::advanceStep(FArrayBox& a_U,
10  FArrayBox& a_Utmp,
11  const FArrayBox& a_dV,
12  FArrayBox& split_tags,
13  BaseFab<unsigned char>& a_Flags,
14  FluxBox& a_F,
15  Time_Step *Dts,
16  const Box& UBox,
17  Grid *grid)
18 /*
19  *
20  *
21  *
22  *
23  *********************************************************************** */
24 {
25  CH_assert(isDefined());
26  CH_assert(UBox == m_currentBox);
27 
28  int nv, *in;
29  int nxf, nyf, nzf, indf;
30  int nxb, nyb, nzb;
31  int i, j, k;
32 
33  int errp, errm, errh;
34  double ***UU[NVAR], *du;
35  #ifdef SKIP_SPLIT_CELLS
36  double ***splitcells;
37  #endif
38  double inv_dtp, *inv_dl;
39  static Data d;
40  static Data_Arr UH, dU;
41  static Data_Arr UP[DIMENSIONS], UM[DIMENSIONS];
42  static double **u, ***T;
43  #if (PARABOLIC_FLUX & EXPLICIT)
44  static double **dcoeff;
45  #endif
46  RBox *rbox = GetRBox(DOM, CENTER);
47  Index indx;
48  static State_1D state;
49  static unsigned char *flagp, *flagm; // these should go inside state !!
50 
51  Riemann_Solver *Riemann = rsolver;
52 
53 /* -----------------------------------------------------------------
54  Check algorithm compatibilities
55  ----------------------------------------------------------------- */
56 
57  #if !(GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL)
58  print1 ("! CTU only works in cartesian or cylindrical coordinates\n");
59  QUIT_PLUTO(1);
60  #endif
61 
63  print ("! advanceStep(): need to re-allocate matrix\n");
64  QUIT_PLUTO(1);
65  }
66 
67 /* -----------------------------------------------------------------
68  Allocate memory
69  ----------------------------------------------------------------- */
70 
71  #if GEOMETRY != CARTESIAN
72  for (nv = 0; nv < NVAR; nv++) a_U.divide(a_dV,0,nv);
73  #if CHOMBO_CONS_AM == YES
74  #if ROTATING_FRAME == YES
75  Box curBox = a_U.box();
76  for(BoxIterator bit(curBox); bit.ok(); ++bit) {
77  const IntVect& iv = bit();
78  a_U(iv,iMPHI) /= a_dV(iv,1);
79  a_U(iv,iMPHI) -= a_U(iv,RHO)*a_dV(iv,1)*g_OmegaZ;
80  }
81  #else
82  a_U.divide(a_dV,1,iMPHI);
83  #endif
84  #endif
85  #else
86  if (g_stretch_fact != 1.) a_U /= g_stretch_fact;
87  #endif
88 
89  for (nv = 0; nv < NVAR; nv++){
90  UU[nv] = ArrayMap(NX3_TOT, NX2_TOT, NX1_TOT, a_U.dataPtr(nv));
91  }
92  #ifdef SKIP_SPLIT_CELLS
93  splitcells = ArrayBoxMap(KBEG, KEND, JBEG, JEND, IBEG, IEND,
94  split_tags.dataPtr(0));
95  #endif
96  #if RESISTIVITY != NO
97  if (d.J == NULL) d.J = ARRAY_4D(3,NX3_MAX, NX2_MAX, NX1_MAX, double);
98  #endif
99 
100 /* -----------------------------------------------------------
101  Allocate static memory areas
102  ----------------------------------------------------------- */
103 
104  if (state.flux == NULL){
105 
106  MakeState (&state);
107 
108  nxf = nyf = nzf = 1;
109  D_EXPAND(nxf = NMAX_POINT; ,
110  nyf = NMAX_POINT; ,
111  nzf = NMAX_POINT;)
112 
113  d.Vc = ARRAY_4D(NVAR, nzf, nyf, nxf, double);
114  d.flag = ARRAY_3D(nzf, nyf, nxf, unsigned char);
115 
116  flagp = ARRAY_1D(NMAX_POINT, unsigned char);
117  flagm = ARRAY_1D(NMAX_POINT, unsigned char);
118  u = ARRAY_2D(NMAX_POINT, NVAR, double);
119 
120  UH = ARRAY_4D(nzf, nyf, nxf, NVAR, double);
121  dU = ARRAY_4D(nzf, nyf, nxf, NVAR, double);
122 
123  D_EXPAND(UM[IDIR] = ARRAY_4D(nzf, nyf, nxf, NVAR, double);
124  UP[IDIR] = ARRAY_4D(nzf, nyf, nxf, NVAR, double); ,
125 
126  UM[JDIR] = ARRAY_4D(nzf, nxf, nyf, NVAR, double);
127  UP[JDIR] = ARRAY_4D(nzf, nxf, nyf, NVAR, double); ,
128 
129  UM[KDIR] = ARRAY_4D(nyf, nxf, nzf, NVAR, double);
130  UP[KDIR] = ARRAY_4D(nyf, nxf, nzf, NVAR, double);)
131 
132  #if (PARABOLIC_FLUX & EXPLICIT)
133  dcoeff = ARRAY_2D(NMAX_POINT, NVAR, double);
134  #endif
135  #if THERMAL_CONDUCTION == EXPLICIT
136  T = ARRAY_3D(nzf, nyf, nxf, double);
137  #endif
138  }
139 
140  g_intStage = 1;
141  TOT_LOOP(k,j,i) d.flag[k][j][i] = 0;
142 
143  #ifdef SKIP_SPLIT_CELLS
144  DOM_LOOP(k,j,i){
145  if (splitcells[k][j][i] < 0.5){
146  d.flag[k][j][i] |= FLAG_SPLIT_CELL;
147  }
148  }
149  #endif
150  getPrimitiveVars (UU, &d, grid);
151  #if (SHOCK_FLATTENING == MULTID) || (ENTROPY_SWITCH)
152  FlagShock (&d, grid);
153  #endif
154  #if THERMAL_CONDUCTION == EXPLICIT
155  TOT_LOOP(k,j,i) T[k][j][i] = d.Vc[PRS][k][j][i]/d.Vc[RHO][k][j][i];
156  #endif
157 
158 /* ----------------------------------------------------
159  1. Normal predictors
160  ---------------------------------------------------- */
161 
162  for (g_dir = 0; g_dir < DIMENSIONS; g_dir++){
163 
164  SetIndexes (&indx, grid);
165  ResetState (&d, &state, grid);
166  #if (RESISTIVITY == EXPLICIT)
167  GetCurrent(&d, g_dir, grid);
168  #endif
169  TRANSVERSE_LOOP(indx,in,i,j,k){
170  g_i = i; g_j = j; g_k = k;
171 
172  state.up = UP[g_dir][*(indx.pt2)][*(indx.pt1)]; state.uL = state.up;
173  state.um = UM[g_dir][*(indx.pt2)][*(indx.pt1)]; state.uR = state.um + 1;
174 
175  for ((*in) = 0; (*in) < indx.ntot; (*in)++) {
176  NVAR_LOOP(nv) state.v[*in][nv] = d.Vc[nv][k][j][i];
177  state.flag[*in] = d.flag[k][j][i];
178  }
179 
180  CheckNaN (state.v, indx.beg-1, indx.end+1, 0);
181  PrimToCons (state.v, u, 0, indx.ntot-1);
182 
183 #if !(PARABOLIC_FLUX & EXPLICIT)
184  States (&state, indx.beg-1, indx.end+1, grid);
185  Riemann (&state, indx.beg-1, indx.end, Dts->cmax, grid);
186  RightHandSide (&state, Dts, indx.beg, indx.end, 0.5*g_dt, grid);
187 
188  if (g_dir == IDIR){ /* -- initialize UU, UH and dU -- */
189  for (nv = NVAR; nv--; ){
190  state.rhs[indx.beg-1][nv] = 0.0;
191  state.rhs[indx.end+1][nv] = 0.0;
192  }
193  for ((*in) = indx.beg-1; (*in) <= indx.end+1; (*in)++){
194  for (nv = NVAR; nv--; ){
195  UU[nv][k][j][i] = u[*in][nv];
196  UH[k][j][i][nv] = u[*in][nv] + state.rhs[*in][nv];
197  dU[k][j][i][nv] = state.rhs[*in][nv];
198  state.up[*in][nv] -= state.rhs[*in][nv];
199  state.um[*in][nv] -= state.rhs[*in][nv];
200  }}
201  }else{
202  for ((*in) = indx.beg; (*in) <= indx.end; (*in)++){
203  for (nv = NVAR; nv--; ){
204  dU[k][j][i][nv] += state.rhs[*in][nv];
205  UH[k][j][i][nv] += state.rhs[*in][nv];
206  state.up[*in][nv] -= state.rhs[*in][nv];
207  state.um[*in][nv] -= state.rhs[*in][nv];
208  }}
209  }
210 #else
211  for ((*in) = 0; (*in) < indx.ntot; (*in)++) {
212  for (nv = NVAR; nv--; ) {
213  state.vp[*in][nv] = state.vm[*in][nv] = state.vh[*in][nv] = state.v[*in][nv];
214  }}
215  PrimToCons(state.vm, state.um, 0, indx.ntot-1);
216  PrimToCons(state.vp, state.up, 0, indx.ntot-1);
217 
218  Riemann (&state, indx.beg-1, indx.end, Dts->cmax, grid);
219 
220  /* -----------------------------------------------------------
221  compute rhs using the hyperbolic fluxes only
222  ----------------------------------------------------------- */
223 
224  #if (VISCOSITY == EXPLICIT)
225  for ((*in) = 0; (*in) < indx.ntot; (*in)++) for (nv = NVAR; nv--; )
226  state.par_src[*in][nv] = 0.0;
227  #endif
228  RightHandSide (&state, Dts, indx.beg, indx.end, 0.5*g_dt, grid);
229  ParabolicFlux (d.Vc, d.J, T, &state, dcoeff, indx.beg-1, indx.end, grid);
230 
231  /* ----------------------------------------------------------
232  compute LR states and subtract normal (hyperbolic)
233  rhs contribution.
234  NOTE: states are computed from IBEG - 1 (= indx.beg)
235  up to IEND + 1 (= indx.end) since EMF has already
236  been evaluated and stored using 1st order states
237  above.
238  ---------------------------------------------------------- */
239 
240  States (&state, indx.beg, indx.end, grid);
241  for ((*in) = indx.beg; (*in) <= indx.end; (*in)++) {
242  for (nv = NVAR; nv--; ){
243  state.up[*in][nv] -= state.rhs[*in][nv];
244  state.um[*in][nv] -= state.rhs[*in][nv];
245  }}
246 
247  /* -----------------------------------------------------------
248  re-compute the full rhs using the total (hyp+par) rhs
249  ----------------------------------------------------------- */
250 
251  RightHandSide (&state, Dts, indx.beg, indx.end, 0.5*g_dt, grid);
252  if (g_dir == IDIR){
253  for ((*in) = indx.beg; (*in) <= indx.end; (*in)++) {
254  for (nv = NVAR; nv--; ){
255  dU[k][j][i][nv] = state.rhs[*in][nv];
256  UH[k][j][i][nv] = u[*in][nv] + state.rhs[*in][nv];
257  }}
258  }else{
259  for ((*in) = indx.beg; (*in) <= indx.end; (*in)++) {
260  for (nv = NVAR; nv--; ){
261  dU[k][j][i][nv] += state.rhs[*in][nv];
262  UH[k][j][i][nv] += state.rhs[*in][nv];
263  }}
264  }
265 #endif
266 
267  }
268  }
269 
270 /* ------------------------------------------------
271  2. compute time and cell centered state.
272  Useful for source terms like gravity,
273  curvilinear terms and Powell's 8wave.
274  ------------------------------------------------ */
275 
276  g_dir = IDIR;
277  SetIndexes (&indx, grid);
278  TRANSVERSE_LOOP(indx,in,i,j,k){
279  g_i = i; g_j = j; g_k = k;
280  errp = ConsToPrim(UH[k][j], state.v, indx.beg, indx.end, d.flag[k][j]);
281  WARNING(
282  if (errp != 0) print("! PatchUnsplit: error recovering U^{n+1/2}\n");
283  )
284  for ((*in) = indx.beg; (*in) <= indx.end; (*in)++) {
285  NVAR_LOOP(nv) d.Vc[nv][k][j][i] = state.v[*in][nv];
286  }
287  #if THERMAL_CONDUCTION == EXPLICIT
288  for ((*in) = indx.beg; (*in) <= indx.end; (*in)++){
289  T[k][j][i] = d.Vc[PRS][k][j][i]/d.Vc[RHO][k][j][i];
290  }
291  #endif
292  }
293 
294 /* ----------------------------------------------------
295  2. Final Conservative Update
296  ---------------------------------------------------- */
297 
298  int numFlux = numFluxes();
299  a_F.resize(UBox,numFlux);
300  a_F.setVal(0.0);
301 
302  g_intStage = 2;
303  for (g_dir = 0; g_dir < DIMENSIONS; g_dir++){
304 
305  SetIndexes (&indx, grid);
306 
307  nxf = grid[IDIR].np_int + (g_dir == IDIR);
308  nyf = grid[JDIR].np_int + (g_dir == JDIR);
309  nzf = grid[KDIR].np_int + (g_dir == KDIR);
310 
311  nxb = grid[IDIR].lbeg - (g_dir == IDIR);
312  nyb = grid[JDIR].lbeg - (g_dir == JDIR);
313  nzb = grid[KDIR].lbeg - (g_dir == KDIR);
314 
315  ResetState (&d, &state, grid);
316  #if (RESISTIVITY == EXPLICIT)
317  GetCurrent(&d, g_dir, grid);
318  #endif
319  TRANSVERSE_LOOP(indx,in,i,j,k){
320 
321  g_i = i; g_j = j; g_k = k;
322 
323  state.up = UP[g_dir][*(indx.pt2)][*(indx.pt1)]; state.uL = state.up;
324  state.um = UM[g_dir][*(indx.pt2)][*(indx.pt1)]; state.uR = state.um + 1;
325 
326  /* --------------------------------------------
327  Correct normal predictors with transverse
328  fluxes to obtain corner coupled states.
329  -------------------------------------------- */
330 
331  for ((*in) = indx.beg-1; (*in) <= indx.end+1; (*in)++){
332  for (nv = NVAR; nv--; ) {
333  state.up[*in][nv] += dU[k][j][i][nv];
334  state.um[*in][nv] += dU[k][j][i][nv];
335  state.vh[*in][nv] = d.Vc[nv][k][j][i];
336  }
337  flagp[*in] = flagm[*in] = state.flag[*in] = d.flag[k][j][i];
338  }
339 
340  /* ------------------------------------------------
341  convert time and cell centered state to
342  conservative vars and corner-coupled states to
343  primitive.
344  ------------------------------------------------ */
345 
346  errp = ConsToPrim (state.up, state.vp, indx.beg-1, indx.end+1, flagp);
347  errm = ConsToPrim (state.um, state.vm, indx.beg-1, indx.end+1, flagm);
348 
349  /* ---- check admissibility of corner coupled states ---- */
350 
351  if (errm || errp){
352  WARNING(
353  print ("! Corner coupled states not physical: reverting to 1st order (level=%d)\n",
354  m_level);
355  showPatch(grid);
356  )
357  for ((*in) = indx.beg-1; (*in) <= indx.end+1; (*in)++){
358  if ( (flagp[*in] & FLAG_CONS2PRIM_FAIL)
359  || (flagm[*in] & FLAG_CONS2PRIM_FAIL)){
360  for (nv = 0; nv < NVAR; nv++) state.v[*in][nv] = d.Vc[nv][k][j][i];
361 
362  for (nv = 0; nv < NVAR; nv++) {
363  state.vm[*in][nv] = state.vp[*in][nv] = state.vh[*in][nv] = state.v[*in][nv];
364  }
365  }
366  }
367  }
368 
369  /* -------------------------------------------------------
370  compute hyperbolic and parabolic fluxes
371  ------------------------------------------------------- */
372 
373  Riemann (&state, indx.beg-1, indx.end, Dts->cmax, grid);
374  #if (PARABOLIC_FLUX & EXPLICIT)
375  ParabolicFlux (d.Vc, d.J, T, &state, dcoeff, indx.beg - 1, indx.end, grid);
376  inv_dl = GetInverse_dl(grid);
377  for ((*in) = indx.beg-1; (*in) <= indx.end; (*in)++) {
378  inv_dtp = 0.0;
379  #if VISCOSITY == EXPLICIT
380  inv_dtp = MAX(inv_dtp, dcoeff[*in][MX1]);
381  #endif
382  #if RESISTIVITY == EXPLICIT
383  EXPAND(inv_dtp = MAX(inv_dtp, dcoeff[*in][BX1]); ,
384  inv_dtp = MAX(inv_dtp, dcoeff[*in][BX2]); ,
385  inv_dtp = MAX(inv_dtp, dcoeff[*in][BX3]);)
386  #endif
388  inv_dtp = MAX(inv_dtp, dcoeff[*in][ENG]);
389  #endif
390  inv_dtp *= inv_dl[*in]*inv_dl[*in];
391 
392  Dts->inv_dtp = MAX(Dts->inv_dtp, inv_dtp);
393  }
394  #endif
395 
396  RightHandSide (&state, Dts, indx.beg, indx.end, g_dt, grid);
397  saveFluxes (&state, indx.beg-1, indx.end, grid);
398 
399  for ((*in) = indx.beg; (*in) <= indx.end; (*in)++) {
400  for (nv = 0; nv < NVAR; nv++) {
401  UU[nv][k][j][i] += state.rhs[*in][nv];
402  }}
403 
404 // Put fluxes in the FarrayBox a_F to be passed to Chombo
405 
406  for ((*in) = indx.beg-1; (*in) <= indx.end; (*in)++) {
407  #if HAVE_ENERGY && ENTROPY_SWITCH
408  state.flux[*in][ENG] = 0.0;
409  state.flux[*in][ENTR] = 0.0;
410  #endif
411  for (nv = 0; nv < NVAR; nv++) {
412  indf = nv*nzf*nyf*nxf + (k - nzb)*nyf*nxf
413  + (j - nyb)*nxf
414  + (i - nxb);
415  a_F[g_dir].dataPtr(0)[indf] = state.flux[*in][nv];
416  }
417  }
418  }
419  }
420 
421  #ifdef GLM_MHD
422  glm_ch_max_loc = MAX(glm_ch_max_loc, Dts->inv_dta*m_dx);
423 // glm_ch_max_loc = MAX(glm_ch_max_loc, Dts->inv_dta); /* If subcycling is turned off */
424  double dtdx = g_dt/g_coeff_dl_min/m_dx;
425 // double dtdx = g_dt/g_coeff_dl_min; /* If subcycling is turned off */
426  GLM_Source (UU, dtdx, grid);
427  #endif
428 
429 /* ----------------------------------------------
430  Source terms included via operator splitting
431  ---------------------------------------------- */
432 
433 #if COOLING != NO
434  ConsToPrim3D(UU, d.Vc, d.flag, rbox);
435  SplitSource (&d, g_dt, Dts, grid);
436  PrimToCons3D(d.Vc, UU, rbox);
437 #endif
438 
439 #if ENTROPY_SWITCH
440 /* -------------------------------------------------------------------
441  At this stage we have U^(n+1) that contains both total energy (E)
442  and entropy (sigma_c) although they have evolved differently.
443  To synchronize them we convert UU to primitive and then again
444  to conservative. This will ensure that in every cell
445  E and sigma_c can be mapped one into another consistently.
446  This step is *essential* when, at then next step,
447  primitive variables will be computed in every zone from entropy
448  rather than selectively from energy and entropy.
449  ------------------------------------------------------------------- */
450 
451  ConsToPrim3D(UU, d.Vc, d.flag, rbox);
452  PrimToCons3D(d.Vc, UU, rbox);
453 #endif
454 
455 /* ---------------------------------------------------------------
456  We pass U*dV/m_dx^3 back to Chombo rather than U.
457  --------------------------------------------------------------- */
458 
459  #if GEOMETRY != CARTESIAN
460  #if CHOMBO_CONS_AM == YES
461  #if ROTATING_FRAME == YES
462  for(BoxIterator bit(curBox); bit.ok(); ++bit) {
463  const IntVect& iv = bit();
464  a_U(iv,iMPHI) += a_U(iv,RHO)*a_dV(iv,1)*g_OmegaZ;
465  a_U(iv,iMPHI) *= a_dV(iv,1);
466  }
467  #else
468  a_U.mult(a_dV,1,iMPHI);
469  #endif
470  #endif
471  for (nv = 0; nv < NVAR; nv++) a_U.mult(a_dV,0,nv);
472  #else
473  if (g_stretch_fact != 1.) a_U *= g_stretch_fact;
474  #endif
475 
476 /* -------------------------------------------------
477  Free memory
478  ------------------------------------------------- */
479 
480  for (nv = 0; nv < NVAR; nv++) FreeArrayMap(UU[nv]);
481 
482  #ifdef SKIP_SPLIT_CELLS
483  FreeArrayBoxMap (splitcells, KBEG, KEND, JBEG, JEND, IBEG, IEND);
484  #endif
485 }
#define MAX(a, b)
Definition: macros.h:101
#define iMPHI
Definition: mod_defs.h:71
tuple T
Definition: Sph_disk.py:33
double ** v
Cell-centered primitive varables at the base time level, v[i] = .
Definition: structs.h:134
void ResetState(const Data *, State_1D *, Grid *)
Definition: set_indexes.c:163
int end
Global end index for the local array.
Definition: structs.h:116
#define MX1
Definition: mod_defs.h:20
double ** vh
Primitive state at n+1/2 (only for one step method)
Definition: structs.h:162
double **** Data_Arr
Definition: pluto.h:492
DOM_LOOP(k, j, i)
Definition: analysis.c:22
double **** J
Electric current defined as curl(B).
Definition: structs.h:54
void GetCurrent(const Data *d, int dir, Grid *grid)
Definition: res_functions.c:97
double ** flux
upwind flux computed with the Riemann solver
Definition: structs.h:149
RBox * GetRBox(int, int)
Definition: rbox.c:232
int ntot
Definition: structs.h:318
#define CENTER
Definition: pluto.h:200
void print1(const char *fmt,...)
Definition: amrPluto.cpp:511
void Riemann_Solver(const State_1D *, int, int, double *, Grid *)
Definition: pluto.h:489
void PrimToCons3D(Data_Arr V, Data_Arr U, RBox *box)
Definition: mappers3D.c:86
double ** rhs
Conservative right hand side.
Definition: structs.h:163
void States(const State_1D *, int, int, Grid *)
Definition: plm_states.c:430
#define RHO
Definition: mod_defs.h:19
#define WARNING(a)
Definition: macros.h:217
long int NX2_TOT
Total number of zones in the X2 direction (boundaries included) for the local processor.
Definition: globals.h:57
int g_intStage
Gives the current integration stage of the time stepping method (predictor = 0, 1st corrector = 1...
Definition: globals.h:98
double **** Vc
The main four-index data array used for cell-centered primitive variables.
Definition: structs.h:31
double g_dt
The current integration time step.
Definition: globals.h:118
int lbeg
Local start index for the local array.
Definition: structs.h:117
#define g_OmegaZ
Definition: init.c:64
double * GetInverse_dl(const Grid *)
Definition: set_geometry.c:205
#define DOM
Computational domain (interior)
Definition: pluto.h:152
#define ARRAY_3D(nx, ny, nz, type)
Definition: prototypes.h:172
double * cmax
Maximum signal velocity for hyperbolic eqns.
Definition: structs.h:214
void FreeArrayMap(double ***t)
Definition: arrays.c:518
#define KDIR
Definition: pluto.h:195
unsigned char *** flag
Pointer to a 3D array setting useful integration flags that are retrieved during integration.
Definition: structs.h:55
#define FLAG_CONS2PRIM_FAIL
Definition: pluto.h:189
void ConsToPrim3D(Data_Arr U, Data_Arr V, unsigned char ***flag, RBox *box)
Definition: mappers3D.c:16
void MakeState(State_1D *)
Definition: tools.c:51
double inv_dtp
Inverse of diffusion (parabolic) time step .
Definition: structs.h:217
#define FLAG_SPLIT_CELL
Zone is covered by a finer level (AMR only).
Definition: pluto.h:183
#define NX3_MAX
Definition: pluto.h:743
double ** vp
prim vars at i+1/2 edge, vp[i] = vL(i+1/2)
Definition: structs.h:142
double inv_dta
Inverse of advection (hyperbolic) time step, .
Definition: structs.h:215
int g_i
x1 grid index when sweeping along the x2 or x3 direction.
Definition: globals.h:82
int CheckNaN(double **, int, int, int)
Definition: tools.c:19
#define THERMAL_CONDUCTION
Definition: pluto.h:365
#define NX2_MAX
Definition: pluto.h:742
#define TOT_LOOP(k, j, i)
Definition: macros.h:44
int * pt1
Definition: structs.h:319
#define IDIR
Definition: pluto.h:193
int g_dir
Specifies the current sweep or direction of integration.
Definition: globals.h:86
int beg
Global start index for the local array.
Definition: structs.h:115
#define PARABOLIC_FLUX
Definition: pluto.h:466
#define NVAR_LOOP(n)
Definition: pluto.h:618
int ConsToPrim(double **ucons, double **uprim, int ibeg, int iend, unsigned char *flag)
Definition: mappers.c:89
int g_j
x2 grid index when sweeping along the x1 or x3 direction.
Definition: globals.h:83
Definition: structs.h:78
long int NX3_TOT
Total number of zones in the X3 direction (boundaries included) for the local processor.
Definition: globals.h:59
int * pt2
Definition: structs.h:320
int j
Definition: analysis.c:2
#define ARRAY_4D(nx, ny, nz, nv, type)
Definition: prototypes.h:173
unsigned char * flag
Definition: structs.h:168
long int IEND
Upper grid index of the computational domain in the the X1 direction for the local processor...
Definition: globals.h:37
int k
Definition: analysis.c:2
void print(const char *fmt,...)
Definition: amrPluto.cpp:497
void SetIndexes(Index *indx, Grid *grid)
Definition: set_indexes.c:49
void RightHandSide(const State_1D *state, Time_Step *Dts, int beg, int end, double dt, Grid *grid)
Definition: rhs.c:88
int g_k
x3 grid index when sweeping along the x1 or x2 direction.
Definition: globals.h:84
double ** vm
prim vars at i-1/2 edge, vm[i] = vR(i-1/2)
Definition: structs.h:141
double ** uR
same as vR, in conservative vars
Definition: structs.h:145
int beg
Definition: structs.h:318
#define BX3
Definition: mod_defs.h:27
#define ARRAY_1D(nx, type)
Definition: prototypes.h:170
D_EXPAND(tot/[n]=(double) grid[IDIR].np_int_glob;, tot/[n]=(double) grid[JDIR].np_int_glob;, tot/[n]=(double) grid[KDIR].np_int_glob;)
Definition: analysis.c:27
Definition: structs.h:30
long int NMAX_POINT
Maximum number of points among the three directions, boundaries excluded.
Definition: globals.h:62
void PrimToCons(double **uprim, double **ucons, int ibeg, int iend)
Definition: mappers.c:26
int i
Definition: analysis.c:2
Definition: structs.h:317
#define BX1
Definition: mod_defs.h:25
void SplitSource(const Data *, double, Time_Step *, Grid *)
Definition: split_source.c:25
double ** um
same as vm, in conservative vars
Definition: structs.h:146
void ParabolicFlux(Data_Arr V, Data_Arr J, double ***T, const State_1D *state, double **dcoeff, int beg, int end, Grid *grid)
double *** ArrayBoxMap(int nrl, int nrh, int ncl, int nch, int ndl, int ndh, double *uptr)
Definition: arrays.c:537
void FreeArrayBoxMap(double ***t, int nrl, int nrh, int ncl, int nch, int ndl, int ndh)
Definition: arrays.c:586
long int KBEG
Lower grid index of the computational domain in the the X3 direction for the local processor...
Definition: globals.h:43
double ** up
same as vp, in conservative vars
Definition: structs.h:147
#define ARRAY_2D(nx, ny, type)
Definition: prototypes.h:171
#define BX2
Definition: mod_defs.h:26
void FlagShock(const Data *, Grid *)
long int KEND
Upper grid index of the computational domain in the the X3 direction for the local processor...
Definition: globals.h:45
#define JDIR
Definition: pluto.h:194
int end
Definition: structs.h:318
long int JBEG
Lower grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:39
#define NVAR
Definition: pluto.h:609
Definition: structs.h:346
#define QUIT_PLUTO(e_code)
Definition: macros.h:125
#define EXPLICIT
Definition: pluto.h:67
void GLM_Source(const Data_Arr Q, double dt, Grid *grid)
Definition: glm.c:139
#define NX1_MAX
Definition: pluto.h:741
long int JEND
Upper grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:41
#define TRANSVERSE_LOOP(indx, ip, i, j, k)
Definition: macros.h:54
long int NX1_TOT
Total number of zones in the X1 direction (boundaries included) for the local processor.
Definition: globals.h:55
long int IBEG
Lower grid index of the computational domain in the the X1 direction for the local processor...
Definition: globals.h:35
#define DIMENSIONS
Definition: definitions_01.h:2
double *** ArrayMap(int nx, int ny, int nz, double *uptr)
Definition: arrays.c:421
int np_int
Total number of points in the local domain (boundaries excluded).
Definition: structs.h:102
double ** uL
same as vL, in conservative vars
Definition: structs.h:144