PLUTO
update_stage.c File Reference

Single stage integration for RK time stepping. More...

#include "pluto.h"
Include dependency graph for update_stage.c:

Go to the source code of this file.

Functions

static void SaveAMRFluxes (const State_1D *, double **, int, int, Grid *)
 
static intList TimeStepIndexList ()
 
void UpdateStage (const Data *d, Data_Arr UU, double **aflux, Riemann_Solver *Riemann, double dt, Time_Step *Dts, Grid *grid)
 

Detailed Description

Single stage integration for RK time stepping.

Advance the equations in conservative form by taking a single stage in the form

\[ U \quad\Longrightarrow \quad U + \Delta t R(V) \]

where U is a 3D array of conservative variables, V is a 3D array of primitive variables, R(V) is the right hand side containing flux differences and source terms. Note that U and V may not necessarily be the map of each other, i.e., U is not U(V). The right hand side can contain contributions from

  • the direction set by the global variable g_dir, when DIMENSIONAL_SPLITTING == YES;
  • all directions when DIMENSIONAL_SPLITTING == NO;

When the integrator stage is the first one (predictor), this function also computes the maximum of inverse time steps for hyperbolic and parabolic terms (if the latters are included explicitly).

Authors
A. Mignone (migno.nosp@m.ne@p.nosp@m.h.uni.nosp@m.to.i.nosp@m.t)
C. Zanni (zanni.nosp@m.@oat.nosp@m.o.ina.nosp@m.f.it)
P. Tzeferacos (petro.nosp@m.s.tz.nosp@m.efera.nosp@m.cos@.nosp@m.ph.un.nosp@m.ito..nosp@m.it) T. Matsakos
Date
March 02, 2014

Definition in file update_stage.c.

Function Documentation

static void SaveAMRFluxes ( const State_1D ,
double **  ,
int  ,
int  ,
Grid  
)
static

Here is the caller graph for this function:

intList TimeStepIndexList ( )
static

Return the intList of inverse time step indices.

Definition at line 456 of file update_stage.c.

461 {
462  int i = 0;
463  intList cdt;
464 
465  cdt.indx[i++] = RHO;
466  #if VISCOSITY == EXPLICIT
467  cdt.indx[i++] = MX1;
468  #endif
469  #if RESISTIVITY == EXPLICIT
470  EXPAND(cdt.indx[i++] = BX1; ,
471  cdt.indx[i++] = BX2; ,
472  cdt.indx[i++] = BX3;)
473  #endif
474  #if THERMAL_CONDUCTION == EXPLICIT
475  cdt.indx[i++] = ENG;
476  #endif
477  cdt.nvar = i;
478 
479  return cdt;
480 }
#define MX1
Definition: mod_defs.h:20
#define RHO
Definition: mod_defs.h:19
int nvar
Number of variables.
Definition: structs.h:330
#define BX3
Definition: mod_defs.h:27
int i
Definition: analysis.c:2
int indx[2046]
Array of integers containg variables indices.
Definition: structs.h:329
#define BX1
Definition: mod_defs.h:25
#define BX2
Definition: mod_defs.h:26

Here is the caller graph for this function:

void UpdateStage ( const Data d,
Data_Arr  UU,
double **  aflux,
Riemann_Solver Riemann,
double  dt,
Time_Step Dts,
Grid grid 
)
Parameters
[in,out]dpointer to PLUTO Data structure
[in,out]UUdata array containing conservative variables at the previous time step to be updated
[out]afluxinterface fluxes needed for refluxing operations (only with AMR)
[in]Riemannpointer to a Riemann solver function
[in]dtthe time step for the current update step
[in,out]Dtspointer to time step structure
[in]gridpointer to array of Grid structures

Definition at line 38 of file update_stage.c.

53 {
54  int i, j, k;
55  int nv, dir, beg_dir, end_dir;
56  int *ip;
57  double *inv_dl, dl2;
58  static double ***T, ***C_dt[NVAR], **dcoeff;
59  static State_1D state;
60  Index indx;
61  intList cdt_list;
62 
63  #if DIMENSIONAL_SPLITTING == YES
64  beg_dir = end_dir = g_dir;
65  #else
66  beg_dir = 0;
67  end_dir = DIMENSIONS-1;
68  #endif
69 
70  cdt_list = TimeStepIndexList();
71 
72 /* --------------------------------------------------------------
73  1. Allocate memory
74  -------------------------------------------------------------- */
75 
76  if (state.v == NULL){
77  MakeState (&state);
78  #if (PARABOLIC_FLUX & EXPLICIT)
79  dcoeff = ARRAY_2D(NMAX_POINT, NVAR, double);
80  #endif
81  }
82 
83 /* --------------------------------------------------------------
84  2. Reset arrays.
85  C_dt is an array used to store the inverse time step for
86  advection and diffusion.
87  We use C_dt[RHO] for advection,
88  C_dt[MX1] for viscosity,
89  C_dt[BX1...BX3] for resistivity and
90  C_dt[ENG] for thermal conduction.
91  --------------------------------------------------------------- */
92 
93  #if DIMENSIONAL_SPLITTING == NO
94  if (C_dt[RHO] == NULL){
95  FOR_EACH(nv, 0, (&cdt_list)) {
96  C_dt[nv] = ARRAY_3D(NX3_MAX, NX2_MAX, NX1_MAX, double);
97  }
98  }
99 
100  if (g_intStage == 1) KTOT_LOOP(k) JTOT_LOOP(j){
101  FOR_EACH(nv, 0, (&cdt_list)) {
102  memset ((void *)C_dt[nv][k][j],'\0', NX1_TOT*sizeof(double));
103  }
104  }
105  #endif
106 
107 /* ------------------------------------------------
108  2a. Compute current arrays
109  ------------------------------------------------ */
110 
111  #if (RESISTIVITY == EXPLICIT) && (defined STAGGERED_MHD)
112  GetCurrent (d, -1, grid);
113  #endif
114 
115 /* ------------------------------------------------
116  2b. Compute Temperature array
117  ------------------------------------------------ */
118 
119  #if THERMAL_CONDUCTION == EXPLICIT
120  if (T == NULL) T = ARRAY_3D(NX3_MAX, NX2_MAX, NX1_MAX, double);
121  TOT_LOOP(k,j,i) T[k][j][i] = d->Vc[PRS][k][j][i]/d->Vc[RHO][k][j][i];
122  #endif
123 
124 /* ----------------------------------------------------------------
125  3. Main loop on directions
126  ---------------------------------------------------------------- */
127 
128  for (dir = beg_dir; dir <= end_dir; dir++){
129 
130  g_dir = dir;
131  SetIndexes (&indx, grid); /* -- set normal and transverse indices -- */
132  ResetState (d, &state, grid);
133 
134  #if (RESISTIVITY == EXPLICIT) && !(defined STAGGERED_MHD)
135  GetCurrent(d, dir, grid);
136  #endif
137 
138  TRANSVERSE_LOOP(indx,ip,i,j,k){
139  g_i = i; g_j = j; g_k = k;
140  for ((*ip) = 0; (*ip) < indx.ntot; (*ip)++) {
141  VAR_LOOP(nv) state.v[(*ip)][nv] = d->Vc[nv][k][j][i];
142  state.flag[*ip] = d->flag[k][j][i];
143  #ifdef STAGGERED_MHD
144  state.bn[(*ip)] = d->Vs[g_dir][k][j][i];
145  #endif
146  }
147  CheckNaN (state.v, 0, indx.ntot-1,0);
148  States (&state, indx.beg - 1, indx.end + 1, grid);
149  Riemann (&state, indx.beg - 1, indx.end, Dts->cmax, grid);
150  #ifdef STAGGERED_MHD
151  CT_StoreEMF (&state, indx.beg - 1, indx.end, grid);
152  #endif
153  #if (PARABOLIC_FLUX & EXPLICIT)
154  ParabolicFlux(d->Vc, d->J, T, &state, dcoeff, indx.beg-1, indx.end, grid);
155  #endif
156  #if UPDATE_VECTOR_POTENTIAL == YES
157  VectorPotentialUpdate (d, NULL, &state, grid);
158  #endif
159  #ifdef SHEARINGBOX
160  SB_SaveFluxes (&state, grid);
161  #endif
162  RightHandSide (&state, Dts, indx.beg, indx.end, dt, grid);
163 
164  /* -- update: U = U + dt*R -- */
165 
166  #ifdef CHOMBO
167  for ((*ip) = indx.beg; (*ip) <= indx.end; (*ip)++) {
168  VAR_LOOP(nv) UU[nv][k][j][i] += state.rhs[*ip][nv];
169  }
170  SaveAMRFluxes (&state, aflux, indx.beg-1, indx.end, grid);
171  #else
172  for ((*ip) = indx.beg; (*ip) <= indx.end; (*ip)++) {
173  VAR_LOOP(nv) UU[k][j][i][nv] += state.rhs[*ip][nv];
174  }
175  #endif
176 
177  if (g_intStage > 1) continue;
178 
179  /* -- compute inverse dt coefficients when g_intStage = 1 -- */
180 
181  inv_dl = GetInverse_dl(grid);
182  for ((*ip) = indx.beg; (*ip) <= indx.end; (*ip)++) {
183  #if DIMENSIONAL_SPLITTING == NO
184 
185  #if !GET_MAX_DT
186  C_dt[0][k][j][i] += 0.5*( Dts->cmax[(*ip)-1]
187  + Dts->cmax[*ip])*inv_dl[*ip];
188  #endif
189  #if (PARABOLIC_FLUX & EXPLICIT)
190  dl2 = 0.5*inv_dl[*ip]*inv_dl[*ip];
191  FOR_EACH(nv, 1, (&cdt_list)) {
192  C_dt[nv][k][j][i] += (dcoeff[*ip][nv]+dcoeff[(*ip)-1][nv])*dl2;
193  }
194  #endif
195 
196  #elif DIMENSIONAL_SPLITTING == YES
197 
198  #if !GET_MAX_DT
199  Dts->inv_dta = MAX(Dts->inv_dta, Dts->cmax[*ip]*inv_dl[*ip]);
200  #endif
201  #if (PARABOLIC_FLUX & EXPLICIT)
202  dl2 = inv_dl[*ip]*inv_dl[*ip];
203  FOR_EACH(nv, 1, (&cdt_list)) {
204  Dts->inv_dtp = MAX(Dts->inv_dtp, dcoeff[*ip][nv]*dl2);
205  }
206  #endif
207  #endif
208  }
209  }
210  }
211 
212 /* -------------------------------------------------------------------
213  4. Additional terms here
214  ------------------------------------------------------------------- */
215 
216  #if (ENTROPY_SWITCH) && (RESISTIVITY == EXPLICIT)
217  EntropyOhmicHeating(d, UU, dt, grid);
218  #endif
219 
220  #ifdef SHEARINGBOX
221  SB_CorrectFluxes (UU, 0.0, dt, grid);
222  #endif
223 
224  #ifdef STAGGERED_MHD
225  CT_Update(d, d->Vs, dt, grid);
226  #endif
227 
228  #if DIMENSIONAL_SPLITTING == YES
229  return;
230  #endif
231 
232 /* -------------------------------------------------------------------
233  5. Reduce dt for dimensionally unsplit schemes.
234  ------------------------------------------------------------------- */
235 
236  if (g_intStage > 1) return;
237 
238  for (k = KBEG; k <= KEND; k++){ g_k = k;
239  for (j = JBEG; j <= JEND; j++){ g_j = j;
240  for (i = IBEG; i <= IEND; i++){
241  #if !GET_MAX_DT
242  Dts->inv_dta = MAX(Dts->inv_dta, C_dt[0][k][j][i]);
243  #endif
244  #if (PARABOLIC_FLUX & EXPLICIT)
245  FOR_EACH(nv, 1, (&cdt_list)) {
246  Dts->inv_dtp = MAX(Dts->inv_dtp, C_dt[nv][k][j][i]);
247  }
248  #endif
249  }
250  }}
251  #if !GET_MAX_DT
252  Dts->inv_dta /= (double)DIMENSIONS;
253  #endif
254  #if (PARABOLIC_FLUX & EXPLICIT)
255  Dts->inv_dtp /= (double)DIMENSIONS;
256  #endif
257 
258 }
static void SaveAMRFluxes(const State_1D *, double **, int, int, Grid *)
#define MAX(a, b)
Definition: macros.h:101
tuple T
Definition: Sph_disk.py:33
void CT_StoreEMF(const State_1D *state, int beg, int end, Grid *grid)
Definition: ct_emf.c:35
double ** v
Cell-centered primitive varables at the base time level, v[i] = .
Definition: structs.h:134
double **** Vs
The main four-index data array used for face-centered staggered magnetic fields.
Definition: structs.h:43
void ResetState(const Data *, State_1D *, Grid *)
Definition: set_indexes.c:163
int end
Global end index for the local array.
Definition: structs.h:116
void EntropyOhmicHeating(const Data *, Data_Arr, double, Grid *)
double **** J
Electric current defined as curl(B).
Definition: structs.h:54
void GetCurrent(const Data *d, int dir, Grid *grid)
Definition: res_functions.c:97
int ntot
Definition: structs.h:318
void States(const State_1D *, int, int, Grid *)
Definition: plm_states.c:430
#define RHO
Definition: mod_defs.h:19
int g_intStage
Gives the current integration stage of the time stepping method (predictor = 0, 1st corrector = 1...
Definition: globals.h:98
double **** Vc
The main four-index data array used for cell-centered primitive variables.
Definition: structs.h:31
#define KTOT_LOOP(k)
Definition: macros.h:40
double * GetInverse_dl(const Grid *)
Definition: set_geometry.c:205
void SB_CorrectFluxes(Data_Arr U, double t, double dt, Grid *grid)
Definition: sb_flux.c:105
void CT_Update(const Data *d, Data_Arr Bs, double dt, Grid *grid)
Definition: ct.c:29
#define ARRAY_3D(nx, ny, nz, type)
Definition: prototypes.h:172
double * cmax
Maximum signal velocity for hyperbolic eqns.
Definition: structs.h:214
#define FOR_EACH(nv, beg, list)
Definition: macros.h:89
void MakeState(State_1D *)
Definition: tools.c:51
double inv_dtp
Inverse of diffusion (parabolic) time step .
Definition: structs.h:217
#define NX3_MAX
Definition: pluto.h:743
double inv_dta
Inverse of advection (hyperbolic) time step, .
Definition: structs.h:215
int g_i
x1 grid index when sweeping along the x2 or x3 direction.
Definition: globals.h:82
int CheckNaN(double **, int, int, int)
Definition: tools.c:19
#define VAR_LOOP(n)
Definition: macros.h:226
#define NX2_MAX
Definition: pluto.h:742
#define TOT_LOOP(k, j, i)
Definition: macros.h:44
int g_dir
Specifies the current sweep or direction of integration.
Definition: globals.h:86
int beg
Global start index for the local array.
Definition: structs.h:115
int g_j
x2 grid index when sweeping along the x1 or x3 direction.
Definition: globals.h:83
int j
Definition: analysis.c:2
long int IEND
Upper grid index of the computational domain in the the X1 direction for the local processor...
Definition: globals.h:37
int k
Definition: analysis.c:2
void SetIndexes(Index *indx, Grid *grid)
Definition: set_indexes.c:49
void RightHandSide(const State_1D *state, Time_Step *Dts, int beg, int end, double dt, Grid *grid)
Definition: rhs.c:88
int g_k
x3 grid index when sweeping along the x1 or x2 direction.
Definition: globals.h:84
int beg
Definition: structs.h:318
long int NMAX_POINT
Maximum number of points among the three directions, boundaries excluded.
Definition: globals.h:62
#define JTOT_LOOP(j)
Definition: macros.h:39
int i
Definition: analysis.c:2
double * bn
Face magentic field, bn = bx(i+1/2)
Definition: structs.h:165
Definition: structs.h:317
void SB_SaveFluxes(State_1D *state, Grid *grid)
Definition: sb_flux.c:57
void ParabolicFlux(Data_Arr V, Data_Arr J, double ***T, const State_1D *state, double **dcoeff, int beg, int end, Grid *grid)
long int KBEG
Lower grid index of the computational domain in the the X3 direction for the local processor...
Definition: globals.h:43
#define ARRAY_2D(nx, ny, type)
Definition: prototypes.h:171
static intList TimeStepIndexList()
Definition: update_stage.c:456
long int KEND
Upper grid index of the computational domain in the the X3 direction for the local processor...
Definition: globals.h:45
void VectorPotentialUpdate(const Data *d, const void *vp, const State_1D *state, const Grid *grid)
int end
Definition: structs.h:318
long int JBEG
Lower grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:39
#define NVAR
Definition: pluto.h:609
#define NX1_MAX
Definition: pluto.h:741
long int JEND
Upper grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:41
#define TRANSVERSE_LOOP(indx, ip, i, j, k)
Definition: macros.h:54
long int NX1_TOT
Total number of zones in the X1 direction (boundaries included) for the local processor.
Definition: globals.h:55
long int IBEG
Lower grid index of the computational domain in the the X1 direction for the local processor...
Definition: globals.h:35
#define DIMENSIONS
Definition: definitions_01.h:2

Here is the call graph for this function:

Here is the caller graph for this function: