PLUTO
sb_flux.c
Go to the documentation of this file.
1 /* ///////////////////////////////////////////////////////////////////// */
2 /*!
3  \file
4  \brief Enforce conservation at the X1 boundaries in the shearing-box
5  module.
6 
7  This file provides functions to store and then modify the upwind
8  fluxes computed during the Riemann solver at the leftmost
9  and righmost physical boundaries.
10  These tasks are performed only when either SB_SYMMETRIZE_HYDRO,
11  SB_SYMMETRIZE_EY, SB_SYMMETRIZE_EZ flags are set to YES in
12  Src/MHD/shearingbox.h and are useful to avoid loss of conservation
13  in the hydrodynamical variables (density, momentum, energy)
14  and/or magnetic fields.
15 
16  This is first achieved by calling the SB_SaveFluxes() function during
17  the time stepping scheme, with the purpose of storing the leftmost and
18  rightmost conservative fluxes in the x-direction into the static
19  arrays FluxL[] and FluxR[].
20 
21  These fluxes are then subsequently used by SB_CorrectFluxes() which
22  interpolates the fluxes and properly correct leftmost and rightmost
23  cell-centered flow quantities to ensure conservation.
24 
25  The treatment of staggered magnetic field is done similarly by
26  SB_CorrectEMF().
27 
28  \b References
29  - "??" \n
30  Mignone et al, in preparation
31 
32  \authors A. Mignone (mignone@ph.unito.it)\n
33  G. Muscianisi (g.musicanisi@cineca.it)\n
34  G. Bodo (bodo@oato.inaf.it)
35 
36  \date Feb 14, 2014
37 */
38 /* ///////////////////////////////////////////////////////////////////// */
39 #include "pluto.h"
40 
41 static double ***FluxL; /**< Array of fluxes at the left x-boundary. */
42 static double ***FluxR; /**< Array of fluxes at the right x-boundary. */
43 
44 #define NVLAST (NVAR-1)
45 
46 /*! Sets the weight coefficient in the range [0,1] used during flux
47  symmetrization at the left and right boundaries,
48 
49  FL -> swL*FL + swR*I(FR)
50  FR -> swL*I(FL) + swR*FR
51 
52  where swR=1-swL and I() stands for conservative interpolation. */
53 #define swL 0.0
54 #define swR (1.0 - swL)
55 
56 /* ********************************************************************* */
57 void SB_SaveFluxes (State_1D *state, Grid *grid)
58 /*!
59  * Store leftmost and rightmost conservative fluxes in the x direction
60  * into FluxL and FluxR.
61  *
62  * \param [in] state pointer to a State_1D structure
63  * \param [in] grid pointer to an array of Grid structures
64  *
65  * \return This function has no return value.
66  *********************************************************************** */
67 {
68  int nv;
69 
70  #if SB_SYMMETRIZE_HYDRO == NO
71  return;
72  #endif
73 
74  if (FluxL == NULL){
75  FluxL = ARRAY_3D(NVAR, NX3_TOT, NX2_TOT, double);
76  FluxR = ARRAY_3D(NVAR, NX3_TOT, NX2_TOT, double);
77  }
78 
79 /* ----------------------------------------------------
80  Save Fluxes on the LEFT physical boundary
81  ---------------------------------------------------- */
82 
83  if (g_dir == IDIR && grid[IDIR].lbound != 0){
84  state->flux[IBEG - 1][MX1] += state->press[IBEG - 1];
85  state->press[IBEG - 1] = 0.0;
86  for (nv = 0; nv <= NVLAST; nv++){
87  FluxL[nv][g_k][g_j] = state->flux[IBEG - 1][nv];
88  }
89  }
90 
91 /* ----------------------------------------------------
92  Save Fluxes on the RIGHT pysical boundary
93  ---------------------------------------------------- */
94 
95  if (g_dir == IDIR && grid[IDIR].rbound != 0){
96  state->flux[IEND][MX1] += state->press[IEND];
97  state->press[IEND] = 0.0;
98  for (nv = 0; nv <= NVLAST; nv++){
99  FluxR[nv][g_k][g_j] = state->flux[IEND][nv];
100  }
101  }
102 }
103 
104 /* ********************************************************************* */
105 void SB_CorrectFluxes (Data_Arr U, double t, double dt, Grid *grid)
106 /*!
107  * Interpolate x-fluxes FLuxL and FluxR and properly correct leftmost
108  * and rightmost cells to ensure conservation.
109  *
110  *
111  * \param [in,out] U data array containing cell-centered quantities
112  * \param [in] t the time step at which fluxes have been computed
113  * \param [in] dt the time step being used in the integrator stage
114  * \param [in] grid pointer to array of Grid structures
115  *********************************************************************** */
116 {
117  int i, j, k, nv;
118  double dtdx;
119  static double **fL, **fR;
120  static double ****Ftmp;
121 
122  #if SB_SYMMETRIZE_HYDRO == NO
123  return;
124  #endif
125 
126  if (fL == NULL){
127  fL = ARRAY_2D(NVAR, NMAX_POINT, double);
128  fR = ARRAY_2D(NVAR, NMAX_POINT, double);
129  Ftmp = ARRAY_4D(NVAR, NX3_TOT, NX2_TOT, 1, double);
130  }
131 
132  dtdx = dt/grid[IDIR].dx[IBEG];
133 
134 {
135 t = g_time;
136 #if TIME_STEPPING == RK2
137  if (g_intStage == 2) t += g_dt;
138 #elif TIME_STEPPING == RK3
139  if (g_intStage == 2) t += 0.5*g_dt;
140  if (g_intStage == 3) t += g_dt;
141 #endif
142 #ifdef CTU
143  if (g_intStage == 2) t += 0.5*g_dt;
144 #endif
145 }
146 
147 /* -----------------------------------------------------------------------
148  Exchange left and right fluxes if they were initially computed on
149  different processors.
150  After this step, both the left- and right- processor- will share
151  the same fluxes FluxL[] and FluxR[].
152  ----------------------------------------------------------------------- */
153 
154  #ifdef PARALLEL
155  ExchangeX (FluxL[0][0], FluxR[0][0], NVAR*NX2_TOT*NX3_TOT, grid);
156  #endif
157 
158 /* --------------------------------------------------------------------- */
159 /*! \note
160  Modify the values of FluxR (on the left) and FluxL (on the right)
161  to properly account for the shift induced by the moving boundaries.
162  For safety reason (avoid overwriting arrays when a processor owns
163  both the left and the right side) we first copy, then interpolate and
164  finally average:
165 
166  - On the left boundary:
167 
168  FluxR -> FluxLR -> I(FluxLR) -> fL = wL*FluxL + wR*FluxLR
169 
170  - On the right boundary:
171 
172  FluxL -> FluxLR -> I(FluxLR) -> fR = wL*FluxLR + wR*FluxR
173 
174  --------------------------------------------------------------------- */
175 
176  if (grid[IDIR].lbound != 0){ /* ---- Left x-boundary ---- */
177 
178  RBox box;
179 
180  /* -- set grid ranges of FluxR and exchange b.c. -- */
181 
182  box.ib = 0; box.ie = 0;
183  box.jb = 0; box.je = NX2_TOT-1;
184  box.kb = 0; box.ke = NX3_TOT-1;
185 
186  /* -- copy FluxR on temporary storage and shift -- */
187 
188  for (nv = 0; nv <= NVLAST; nv++) {
189  BOX_LOOP((&box), k, j, i) Ftmp[nv][k][j][i] = FluxR[nv][k][j];
190  SB_SetBoundaryVar(Ftmp[nv], &box, X1_BEG, t, grid);
191  }
192 
193  /* -- symmetrize fluxes -- */
194 
195  for (k = KBEG; k <= KEND; k++){
196  for (nv = 0; nv <= NVLAST; nv++){
197  for (j = JBEG; j <= JEND; j++){
198  fL[nv][j] = swL*FluxL[nv][k][j] + swR*Ftmp[nv][k][j][0];
199  }}
200  for (j = JBEG; j <= JEND; j++){
201  #if HAVE_ENERGY
202  fL[ENG][j] += swR*sb_vy*(fL[MX2][j] + 0.5*sb_vy*fL[RHO][j]);
203  #endif
204  #ifdef GLM_MHD
205  fL[BX2][j] -= swR*sb_vy*fL[PSI_GLM][j]/glm_ch/glm_ch;
206 /* fL[BX2][j] -= 0.5*sb_vy*FluxL[PSI_GLM][k][j]/glm_ch/glm_ch; */
207  #endif
208  fL[MX2][j] += swR*sb_vy*fL[RHO][j];
209 
210  /* -- update solution vector -- */
211 
212  for (nv = 0; nv <= NVLAST; nv++){
213  U[k][j][IBEG][nv] += dtdx*(fL[nv][j] - FluxL[nv][k][j]);
214  }
215  }
216  }
217  }
218 
219  if (grid[IDIR].rbound != 0){ /* ---- Right x-boundary ---- */
220 
221  /* -- set grid ranges of FluxL and exchange b.c. -- */
222 
223  RBox box;
224  box.ib = 0; box.ie = 0;
225  box.jb = 0; box.je = NX2_TOT-1;
226  box.kb = 0; box.ke = NX3_TOT-1;
227 
228  /* -- copy FluxL on temporary storage and shift -- */
229 
230  for (nv = 0; nv <= NVLAST; nv++) {
231  BOX_LOOP((&box), k, j, i) Ftmp[nv][k][j][i] = FluxL[nv][k][j];
232  SB_SetBoundaryVar(Ftmp[nv], &box, X1_END, t, grid);
233  }
234 
235  /* -- symmetrize fluxes -- */
236 
237  for (k = KBEG; k <= KEND; k++){
238  for (nv = 0; nv <= NVLAST; nv++){
239  for (j = JBEG; j <= JEND; j++){
240  fR[nv][j] = swL*Ftmp[nv][k][j][0] + swR*FluxR[nv][k][j];
241  }}
242  for (j = JBEG; j <= JEND; j++){
243  #if HAVE_ENERGY
244  fR[ENG][j] += swL*sb_vy*(-fR[MX2][j] + 0.5*sb_vy*fR[RHO][j]);
245  #endif
246  #ifdef GLM_MHD
247  fR[BX2][j] += swL*sb_vy*fR[PSI_GLM][j]/glm_ch/glm_ch;
248 /* fR[BX2][j] += 0.5*sb_vy*FluxR[PSI_GLM][k][j]/glm_ch/glm_ch; */
249  #endif
250  fR[MX2][j] -= swL*sb_vy*fR[RHO][j];
251 
252  for (nv = 0; nv <= NVLAST; nv++){
253  U[k][j][IEND][nv] -= dtdx*(fR[nv][j] - FluxR[nv][k][j]);
254  }
255  }
256  }
257  }
258 }
259 
260 #ifdef STAGGERED_MHD
261 /* ********************************************************************* */
262 void SB_CorrectEMF (EMF *emf, Data_Arr V0, Grid *grid)
263 /*!
264  * Correct the electromotive force by enforcing symmetrization in order
265  * to guarantee that Bx on the left is mapped from Bx on the right.
266  * This is done by re-defining the y- and z-components of the electric
267  * field as follows:
268  *
269  * \f[
270  * E_{z,L} \to \frac{1}{2}\left[E_{z,L} + \Aop^{t_E}_{>}(E_{z,R})
271  * + wF_{+}\right]
272  * \,,\qquad
273  * E_{z,R} \to \frac{1}{2}\left[\Aop^{t_E}_<(E_{z,L}) + E_{z,R}
274  * - wF_-\right]
275  * \f]
276  * where \f$t_E =\f$ is time level of the electric field, $\Aop$ are
277  * the forward (>) or backward (<) shift operators.
278  * The corrective terms \f$F_{\pm}\f$ are the upwind numerical flux
279  * computed in the advection of the x-component of magnetic field:
280  * \f[
281  * F_{+} = B_{x,L} + \frac{\widetilde{\Delta B}_{xL}}{2}
282  * \left(1 - \frac{\delta t}{\Delta y}w\right)
283  * \,,\qquad
284  * F_{-} = B_{x,R} - \frac{\widetilde{\Delta B}_{xR}}{2}
285  * \left(1 - \frac{\delta t}{\Delta y}w\right)
286  * \f]
287  * where \f$ \delta t\ne \Delta t\f$ is the time increment of the shift
288  * operator applied to the magnetic field, \f$B_{xL}\f$ and \f$B_{xR}\f$
289  * are the x component of magnetic field computed at some intermediate
290  * time \c tB while
291  * \f[
292  * \widetilde{\Delta B}_{xL} =
293  * \frac{ \overline{\Delta B}_{xL}
294  * + \Aop^{tB}_{>}(\overline{\Delta B}_{xR})}{2}
295  * \;,\qquad
296  * \widetilde{\Delta B}_{xR} =
297  * \frac{ \Aop^{tB}_{<}(\overline{\Delta B}_{xL})
298  * + \overline{\Delta B}_{xR}}{2}
299  * \f]
300  * are the arithmetic average of the limited slopes of \c Bx computed
301  * at the left and right boundaries.
302  *
303  * \param [in,out] emf pointer to EMF structure
304  * \param [in] V0 4D array of staggered fields
305  * \param [in] grid pointer to array of Grid structures
306  *********************************************************************** */
307 {
308  int i, j, k, nghost;
309  int dimx[3] = {1, 0, 0};
310  int dimy[3] = {0, 1, 0};
311  int dimz[3] = {0, 0, 1};
312  double fE, esym;
313  double tB, tE, w, dt, dtdy;
314  static double **BxL0, **BxL, ***dBxL_lim, *dBxL, ***eyL, ***ezL;
315  static double **BxR0, **BxR, ***dBxR_lim, *dBxR, ***eyR, ***ezR;
316  static double ***etmp, ***dBtmp;
317 
318  #if (SB_SYMMETRIZE_EY == NO) && (SB_SYMMETRIZE_EZ == NO) \
319  && (SB_FORCE_EMF_PERIODS == NO)
320  return;
321  #endif
322 
323  if (ezL == NULL){
324  eyL = ARRAY_3D(NX3_TOT, NX2_TOT, 1, double);
325  eyR = ARRAY_3D(NX3_TOT, NX2_TOT, 1, double);
326 
327  ezL = ARRAY_3D(NX3_TOT, NX2_TOT, 1, double);
328  ezR = ARRAY_3D(NX3_TOT, NX2_TOT, 1, double);
329 
330  BxL = ARRAY_2D(NX3_TOT, NX2_TOT, double);
331  BxR = ARRAY_2D(NX3_TOT, NX2_TOT, double);
332 
333  BxL0 = ARRAY_2D(NX3_TOT, NX2_TOT, double);
334  BxR0 = ARRAY_2D(NX3_TOT, NX2_TOT, double);
335 
336  dBxL = ARRAY_1D(NMAX_POINT, double);
337  dBxR = ARRAY_1D(NMAX_POINT, double);
338  dBxL_lim = ARRAY_3D(NX3_TOT, NX2_TOT, 1, double);
339  dBxR_lim = ARRAY_3D(NX3_TOT, NX2_TOT, 1, double);
340 
341  etmp = ARRAY_3D(NX3_TOT, NX2_TOT, 1, double); /* temporary storage */
342  dBtmp = ARRAY_3D(NX3_TOT, NX2_TOT, 1, double); /* temporary storage */
343  }
344 
345  w = sb_vy; /* shear velocity */
346  dt = g_dt; /* default increment of the shift operator A(B) */
347  tB = g_time; /* time level of the magnetic field V0 */
348  tE = g_time + g_dt; /* increment for shift operator A(E) */
349 
350 /* --------------------------------------------------------------
351  Store initial Bx on the left and on the right
352  -------------------------------------------------------------- */
353 
354  if (g_intStage == 1){
355  if (grid[IDIR].lbound != 0){
356  KTOT_LOOP(k) JTOT_LOOP(j) BxL[k][j] = BxL0[k][j] = V0[BX1s][k][j][IBEG - 1];
357  }
358  if (grid[IDIR].rbound != 0){
359  KTOT_LOOP(k) JTOT_LOOP(j) BxR[k][j] = BxR0[k][j] = V0[BX1s][k][j][IEND];
360  }
361  }
362 
363  #ifdef CTU
364  if (grid[IDIR].lbound != 0){
365  KTOT_LOOP(k) JTOT_LOOP(j) BxL[k][j] = BxL0[k][j] = V0[BX1s][k][j][IBEG - 1];
366  }
367  if (grid[IDIR].rbound != 0){
368  KTOT_LOOP(k) JTOT_LOOP(j) BxR[k][j] = BxR0[k][j] = V0[BX1s][k][j][IEND];
369  }
370  #elif TIME_STEPPING == RK2
371  if (g_intStage == 2) {
372  tB = g_time + 0.5*g_dt;
373  dt = 0.5*g_dt;
374  if (grid[IDIR].lbound != 0) KTOT_LOOP(k) JTOT_LOOP(j) {
375  BxL[k][j] = 0.5*(BxL0[k][j] + V0[BX1s][k][j][IBEG - 1]);
376  }
377  if (grid[IDIR].rbound != 0) KTOT_LOOP(k) JTOT_LOOP(j) {
378  BxR[k][j] = 0.5*(BxR0[k][j] + V0[BX1s][k][j][IEND]);
379  }
380  }
381  #elif TIME_STEPPING == RK3
382  if (g_intStage == 2) {
383  tB = g_time + 0.25*g_dt;
384  tE = g_time + 0.5*g_dt;
385  dt = 0.25*g_dt;
386  if (grid[IDIR].lbound != 0) KTOT_LOOP(k) JTOT_LOOP(j) {
387  BxL[k][j] = 0.75*BxL0[k][j] + 0.25*V0[BX1s][k][j][IBEG - 1];
388  }
389  if (grid[IDIR].rbound != 0) KTOT_LOOP(k) JTOT_LOOP(j) {
390  BxR[k][j] = 0.75*BxR0[k][j] + 0.25*V0[BX1s][k][j][IEND];
391  }
392  }else if (g_intStage == 3) {
393  tB = g_time + g_dt/3.0;
394  tE = g_time + g_dt;
395  dt = g_dt/1.5;
396  if (grid[IDIR].lbound != 0) KTOT_LOOP(k) JTOT_LOOP(j) {
397  BxL[k][j] = (BxL0[k][j] + 2.0*V0[BX1s][k][j][IBEG - 1])/3.0;
398  }
399  if (grid[IDIR].rbound != 0) KTOT_LOOP(k) JTOT_LOOP(j) {
400  BxR[k][j] = (BxR0[k][j] + 2.0*V0[BX1s][k][j][IEND])/3.0;
401  }
402  }
403  #endif
404 
405  dtdy = dt/grid[JDIR].dx[JBEG];
406  nghost = grid[IDIR].nghost;
407 
408 /* -- compute Bx slopes on left side -- */
409 
410  if (grid[IDIR].lbound != 0){
411 
412  /* -- Store Ey, Ez, Bx on the left and compute slopes -- */
413 
414  KTOT_LOOP(k) JTOT_LOOP(j){
415  D_EXPAND( ,
416  ezL[k][j][0] = emf->ez[k][j][IBEG - 1]; ,
417  eyL[k][j][0] = emf->ey[k][j][IBEG - 1];)
418  }
419 
420  for (k = KBEG; k <= KEND; k++){
421  for (j = 1; j <= JEND + nghost; j++) dBxL[j] = BxL[k][j] - BxL[k][j-1];
422  for (j = JBEG-1; j <= JEND+1; j++){
423  dBxL_lim[k][j][0] = VAN_LEER(dBxL[j+1], dBxL[j]);
424  }
425  }
426  }
427 
428 /* -- compute Bx slopes on right side -- */
429 
430  if (grid[IDIR].rbound != 0){
431 
432  /* -- Store Ey, Ez, Bx on the right and compute slopes -- */
433 
434  KTOT_LOOP(k) JTOT_LOOP(j){
435  D_EXPAND( ,
436  ezR[k][j][0] = emf->ez[k][j][IEND]; ,
437  eyR[k][j][0] = emf->ey[k][j][IEND]; )
438  }
439 
440  for (k = KBEG; k <= KEND; k++){
441  for (j = 1; j <= JEND + nghost; j++) dBxR[j] = BxR[k][j] - BxR[k][j-1];
442  for (j = JBEG-1; j <= JEND+1; j++){
443  dBxR_lim[k][j][0] = VAN_LEER(dBxR[j+1], dBxR[j]);
444  }
445  }
446  }
447 
448 /* ---------------------------------------------------------------------
449  exchange data between processors
450  --------------------------------------------------------------------- */
451 
452  #ifdef PARALLEL
453  D_EXPAND( ,
454  ExchangeX (ezL[0][0], ezR[0][0], NX3_TOT*NX2_TOT, grid);
455  ExchangeX (dBxL_lim[0][0], dBxR_lim[0][0], NX3_TOT*NX2_TOT, grid); ,
456  ExchangeX (eyL[0][0], eyR[0][0], NX3_TOT*NX2_TOT, grid); )
457  #endif
458 
459 /* ----------------------------------------------------------------
460  Symmetrize Ey and Ez on the left.
461  We copy one of the two arrays before doing interpolation since
462  its original value is lost after b.c. have been assigned.
463  ---------------------------------------------------------------- */
464 
465  if (grid[IDIR].lbound != 0){
466  RBox box;
467 
468  box.ib = 0; box.ie = 0;
469  box.jb = 0; box.je = NX2_TOT-1;
470  box.kb = 0; box.ke = NX3_TOT-1;
471  #if SB_SYMMETRIZE_EY == YES
472 
473  /* -- Interpolate eyR --> eyLR -- */
474 
475  BOX_LOOP((&box), k, j, i) etmp[k][j][i] = eyR[k][j][i];
476  SB_SetBoundaryVar(etmp, &box, X1_BEG, tE, grid);
477  for (k = KBEG - 1; k <= KEND; k++){
478  for (j = JBEG; j <= JEND; j++) {
479  emf->ey[k][j][IBEG - 1] = swL*eyL[k][j][0] + swR*etmp[k][j][0];
480  }
481  }
482  #endif
483 
484 /* --------------------------------------------------------------------- */
485 /*! \note
486  Symmetrization of Ez can only be done using weight coefficients of
487  1/2 (and not swL, swR) since limited slopes (dbxL_lim and dbxR_lim)
488  do not satisfy the property that their sum is zero (as expected for
489  a periodic function). */
490 /* --------------------------------------------------------------------- */
491 
492  #if SB_SYMMETRIZE_EZ == YES
493 
494  /* -- interpolate dbxR_lim, ezR --> dbxLR_lim, ezLR -- */
495 
496  BOX_LOOP((&box), k, j, i) {
497  dBtmp[k][j][i] = dBxR_lim[k][j][i];
498  etmp[k][j][i] = ezR[k][j][i];
499  }
500  SB_SetBoundaryVar(dBtmp, &box, X1_BEG, tB, grid);
501  SB_SetBoundaryVar( etmp, &box, X1_BEG, tE, grid);
502  for (k = KBEG; k <= KEND; k++){
503  for (j = JBEG - 1; j <= JEND + 1; j++){
504  dBxL[j] = 0.5*(dBxL_lim[k][j][0] + dBtmp[k][j][0]);
505  }
506  for (j = JBEG - 1; j <= JEND; j++){
507  fE = w*(BxL[k][j] + 0.5*dBxL[j]*(1.0 - w*dtdy));
508  emf->ez[k][j][IBEG-1] = 0.5*(ezL[k][j][0] + etmp[k][j][0] + fE);
509  }
510  }
511  #endif
512  }
513 
514 /* ------------------------------------------------------
515  Symmetrize Ey, and Ez on the right
516  ------------------------------------------------------ */
517 
518  if (grid[IDIR].rbound != 0){
519  RBox box;
520  box.ib = 0; box.ie = 0;
521  box.jb = 0; box.je = NX2_TOT-1;
522  box.kb = 0; box.ke = NX3_TOT-1;
523 
524  #if SB_SYMMETRIZE_EY == YES
525  SB_SetBoundaryVar(eyL, &box, X1_END, tE, grid);
526  for (k = KBEG - 1; k <= KEND; k++){
527  for (j = JBEG; j <= JEND; j++) {
528  emf->ey[k][j][IEND] = swL*eyL[k][j][0] + swR*eyR[k][j][0];
529  }
530  }
531  #endif
532 
533  #if SB_SYMMETRIZE_EZ == YES
534  SB_SetBoundaryVar(dBxL_lim, &box, X1_END, tB, grid);
535  SB_SetBoundaryVar( ezL, &box, X1_END, tE, grid);
536 
537  for (k = KBEG; k <= KEND; k++){
538  for (j = JBEG - 1; j <= JEND + 1; j++){
539  dBxR[j] = 0.5*(dBxL_lim[k][j][0] + dBxR_lim[k][j][0]);
540  }
541  for (j = JBEG - 1; j <= JEND; j++){
542  fE = w*(BxR[k][j + 1] - 0.5*dBxR[j + 1]*(1.0 - w*dtdy));
543  emf->ez[k][j][IEND] = 0.5*(ezR[k][j][0] + ezL[k][j][0] - fE);
544  }
545  }
546  #endif
547  }
548 
549 /* --------------------------------------------------
550  Force Periodicity
551  -------------------------------------------------- */
552 
553  #if DIMENSIONS == 3 && SB_FORCE_EMF_PERIODS == YES
554 
555  /* -- Ex at Z faces: force periodicty -- */
556 
557  #ifdef PARALLEL
558  MPI_Barrier (MPI_COMM_WORLD);
559  AL_Exchange_dim (emf->ex[0][0], dimz, SZ);
560  MPI_Barrier (MPI_COMM_WORLD);
561  #else
562  for (j = JBEG - 1; j <= JEND; j++){
563  for (i = IBEG ; i <= IEND; i++){
564  esym = 0.5*(emf->ex[KBEG - 1][j][i] + emf->ex[KEND][j][i]);
565  emf->ex[KBEG - 1][j][i] = esym;
566  emf->ex[KEND ][j][i] = esym;
567  }}
568  #endif
569 
570  /* Ex at Y faces: force periodicity */
571 
572  for (k = KBEG - 1; k <= KEND; k++){
573  for (i = IBEG ; i <= IEND; i++){
574  esym = 0.5*(emf->ex[k][JBEG - 1][i] + emf->ex[k][JEND][i]);
575  emf->ex[k][JBEG - 1][i] = esym;
576  emf->ex[k][JEND ][i] = esym;
577  }}
578 
579  /* Ey at Z faces: force periodicity */
580 
581  #ifdef PARALLEL
582  MPI_Barrier (MPI_COMM_WORLD);
583  AL_Exchange_dim (emf->ey[0][0], dimz, SZ);
584  MPI_Barrier (MPI_COMM_WORLD);
585  #else
586  for (j = JBEG ; j <= JEND; j++){
587  for (i = IBEG - 1; i <= IEND; i++){
588  esym = 0.5*(emf->ey[KBEG - 1][j][i] + emf->ey[KEND][j][i]);
589  emf->ey[KBEG - 1][j][i] = esym;
590  emf->ey[KEND ][j][i] = esym;
591  }}
592  #endif
593 
594  /* Ez at Y faces: force periodicity */
595 
596  for (k = KBEG ; k <= KEND; k++){
597  for (i = IBEG - 1; i <= IEND; i++){
598  esym = 0.5*(ez[k][JBEG - 1][i] + ez[k][JEND][i]);
599  ez[k][JBEG - 1][i] = esym;
600  ez[k][JEND ][i] = esym;
601  }}
602 
603  #endif
604 }
605 #endif
606 
607 #ifdef PARALLEL
608 /* ********************************************************************* */
609 void ExchangeX (double *bufL, double *bufR, int nel, Grid *grid)
610 /*!
611  * Send bufL owned by processor at X1_BEG to the processor at X1_END;
612  * Send bufR owned by processor at X1_END to the processor at X1_BEG;
613  *
614  *********************************************************************** */
615 {
616  static int dest = -1;
617  int stag = 1, rtag = 1;
618  MPI_Comm cartcomm;
619  MPI_Status istat;
620  MPI_Request req;
621  static int nprocs[3], periods[3], coords[3];
622 
623  AL_Get_cart_comm(SZ, &cartcomm);
624  MPI_Barrier (MPI_COMM_WORLD);
625 
626 /* --------------------------------------
627  get rank of the processor lying
628  on the opposite side of x-domain
629  -------------------------------------- */
630 
631  if (dest == -1){
632  if (grid[IDIR].lbound != 0){
633  MPI_Cart_get(cartcomm, 3, nprocs, periods, coords);
634  coords[0] += nprocs[0] - 1;
635  MPI_Cart_rank (cartcomm, coords, &dest);
636  }
637 
638  if (grid[IDIR].rbound != 0){
639  MPI_Cart_get(cartcomm, 3, nprocs, periods, coords);
640  coords[0] += - nprocs[0] + 1;
641  MPI_Cart_rank (cartcomm, coords, &dest);
642  }
643  }
644 
645  if (grid[IDIR].lbound != 0){
646  if (prank != dest){
647  MPI_Sendrecv (bufL, nel, MPI_DOUBLE, dest, stag,
648  bufR, nel, MPI_DOUBLE, dest, rtag,
649  MPI_COMM_WORLD, &istat);
650  }
651  }
652 
653  if (grid[IDIR].rbound != 0){
654  if (prank != dest){
655  MPI_Sendrecv (bufR, nel, MPI_DOUBLE, dest, stag,
656  bufL, nel, MPI_DOUBLE, dest, rtag,
657  MPI_COMM_WORLD, &istat);
658  }
659  }
660 
661  MPI_Barrier (MPI_COMM_WORLD);
662 }
663 
664 #endif
static Data_Arr V0
Definition: failsafe.c:4
int AL_Exchange_dim(char *buf, int *dims, int sz_ptr)
#define X1_BEG
Boundary region at X1 beg.
Definition: pluto.h:146
static EMF emf
Definition: ct_emf.c:27
int jb
Lower corner index in the x2 direction.
Definition: structs.h:349
#define MX1
Definition: mod_defs.h:20
#define NVLAST
Definition: sb_flux.c:44
double **** Data_Arr
Definition: pluto.h:492
double ** flux
upwind flux computed with the Riemann solver
Definition: structs.h:149
static int rbound
Definition: jet_domain.c:25
double *** ex
Definition: ct.h:103
#define RHO
Definition: mod_defs.h:19
#define BOX_LOOP(B, k, j, i)
Definition: macros.h:70
long int NX2_TOT
Total number of zones in the X2 direction (boundaries included) for the local processor.
Definition: globals.h:57
int g_intStage
Gives the current integration stage of the time stepping method (predictor = 0, 1st corrector = 1...
Definition: globals.h:98
int kb
Lower corner index in the x3 direction.
Definition: structs.h:351
#define KTOT_LOOP(k)
Definition: macros.h:40
double g_dt
The current integration time step.
Definition: globals.h:118
#define PSI_GLM
Definition: mod_defs.h:34
void SB_CorrectFluxes(Data_Arr U, double t, double dt, Grid *grid)
Definition: sb_flux.c:105
#define VAN_LEER(a, b)
Definition: macros.h:113
#define X1_END
Boundary region at X1 end.
Definition: pluto.h:147
#define ARRAY_3D(nx, ny, nz, type)
Definition: prototypes.h:172
double * dx
Definition: structs.h:83
int prank
Processor rank.
Definition: globals.h:33
#define swR
Definition: sb_flux.c:54
int ib
Lower corner index in the x1 direction.
Definition: structs.h:347
int AL_Get_cart_comm(int, MPI_Comm *)
Definition: al_sz_get.c:117
double *** ez
Definition: ct.h:105
#define IDIR
Definition: pluto.h:193
int g_dir
Specifies the current sweep or direction of integration.
Definition: globals.h:86
int g_j
x2 grid index when sweeping along the x1 or x3 direction.
Definition: globals.h:83
Definition: structs.h:78
long int NX3_TOT
Total number of zones in the X3 direction (boundaries included) for the local processor.
Definition: globals.h:59
int j
Definition: analysis.c:2
#define MX2
Definition: mod_defs.h:21
if(divB==NULL)
Definition: analysis.c:10
#define ARRAY_4D(nx, ny, nz, nv, type)
Definition: prototypes.h:173
int nghost
Number of ghost zones.
Definition: structs.h:104
long int IEND
Upper grid index of the computational domain in the the X1 direction for the local processor...
Definition: globals.h:37
int k
Definition: analysis.c:2
double sb_vy
Velocity offset (>0), in SB_Boundary().
Definition: sb_boundary.c:24
#define BX1s
Definition: ct.h:27
void SB_SetBoundaryVar(double ***U, RBox *box, int side, double t, Grid *grid)
Definition: sb_tools.c:36
int g_k
x3 grid index when sweeping along the x1 or x2 direction.
Definition: globals.h:84
PLUTO main header file.
#define ARRAY_1D(nx, type)
Definition: prototypes.h:170
D_EXPAND(tot/[n]=(double) grid[IDIR].np_int_glob;, tot/[n]=(double) grid[JDIR].np_int_glob;, tot/[n]=(double) grid[KDIR].np_int_glob;)
Definition: analysis.c:27
#define swL
Definition: sb_flux.c:53
long int NMAX_POINT
Maximum number of points among the three directions, boundaries excluded.
Definition: globals.h:62
#define JTOT_LOOP(j)
Definition: macros.h:39
int i
Definition: analysis.c:2
double g_time
The current integration time.
Definition: globals.h:117
int ie
Upper corner index in the x1 direction.
Definition: structs.h:348
int ke
Upper corner index in the x3 direction.
Definition: structs.h:352
int je
Upper corner index in the x2 direction.
Definition: structs.h:350
void SB_SaveFluxes(State_1D *state, Grid *grid)
Definition: sb_flux.c:57
static double *** FluxL
Array of fluxes at the left x-boundary.
Definition: sb_flux.c:41
static double *** FluxR
Array of fluxes at the right x-boundary.
Definition: sb_flux.c:42
double * press
Upwind pressure term computed with the Riemann solver.
Definition: structs.h:164
long int KBEG
Lower grid index of the computational domain in the the X3 direction for the local processor...
Definition: globals.h:43
#define ARRAY_2D(nx, ny, type)
Definition: prototypes.h:171
#define BX2
Definition: mod_defs.h:26
long int KEND
Upper grid index of the computational domain in the the X3 direction for the local processor...
Definition: globals.h:45
double *** ey
Definition: ct.h:104
double glm_ch
The propagation speed of divergence error.
Definition: glm.c:21
Definition: al_hidden.h:38
#define JDIR
Definition: pluto.h:194
long int JBEG
Lower grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:39
#define NVAR
Definition: pluto.h:609
Definition: structs.h:346
long int JEND
Upper grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:41
long int IBEG
Lower grid index of the computational domain in the the X1 direction for the local processor...
Definition: globals.h:35