PLUTO
sb_tools.c
Go to the documentation of this file.
1 /* ///////////////////////////////////////////////////////////////////// */
2 /*!
3  \file
4  \brief Miscellaneous functions for implementing the shearing-box
5  boundary condition.
6 
7  The shearing-box tool file contains various functions for the
8  implementation of the shearingbox boundary conditions in serial or
9  parallel mode.
10  The SB_SetBoundaryVar() function applies shearing-box boundary conditions
11  to a 3D array U[k][j][i] at an X1_BEG or X1_END boundary.
12  The array U[k][j][i] is defined on the RBox *box with grid indices
13  (box->ib) <= i <= (box->ie), (box->jb) <= j <= (box->je)
14  (box->kb) <= k <= (box->ke), and assumes that periodic boundary
15  conditions have already been set.
16  Indices in the y-directions are not necessary, since the
17  entire y-range is assumed.
18 
19  In parallel mode, we use the SB_ShiftBoundaryVar() function to perform
20  the integer shift of cells across the processors, while
21  interpolation function SB_ShearingInterp() handles the fractional
22  part only.
23  In serial or when there's only 1 processor along y, the
24  interpolation function does all the job (integer+fractional).
25 
26  \authors A. Mignone (mignone@ph.unito.it)\n
27  G. Muscianisi (g.musicanisi@cineca.it)
28  \date Jan 31, 2014
29 */
30 /* ///////////////////////////////////////////////////////////////////// */
31 #include "pluto.h"
32 
33 static double sb_Ly;
34 
35 /* ********************************************************************* */
36 void SB_SetBoundaryVar(double ***U, RBox *box, int side, double t, Grid *grid)
37 /*!
38  * Fill ghost zones using shearing-box conditions.
39  *
40  * \param [out] U pointer to a 3D array (centered or staggered)
41  * \param [in] box pointer to RBox structure defining the domain
42  * sub-portion over which shearing-box conditions
43  * have to be applied
44  * \param [in] side the side of the X1 boundary (X1_BEG or X1_END)
45  * \param [in] t the simulation time
46  * \param [in] grid pointer to an array of Grid structures
47  *
48  * \return This function has no return value.
49  *********************************************************************** */
50 {
51  int i, j, k, ngh_y;
52  int nghL, nghR;
53  int coords[3], dst1, dst2;
54  long int count, buf_size;
55  static double *qL, *qR;
56 
57  if (side != X1_BEG && side != X1_END){
58  print1 ("! SB_SetBoundaryVar: wrong boundary\n");
59  QUIT_PLUTO(1);
60  }
61 
62  if (qL == NULL){
63  qL = ARRAY_1D(NMAX_POINT, double);
64  qR = ARRAY_1D(NMAX_POINT, double);
65  }
66 
68 
69 /* -- get number of ghost zones to the left and to the right --*/
70 
71  nghL = JBEG - box->jb;
72  nghR = box->je - JEND;
73 
74 /* -- shift data values across parallel domains -- */
75 
76  #ifdef PARALLEL
77  if (grid[JDIR].nproc > 1) SB_ShiftBoundaryVar(U, box, side, t, grid);
78  #endif
79 
80 /* -- exchange values between processors to fill ghost zones -- */
81 
82  SB_FillBoundaryGhost(U, box, nghL, nghR, grid);
83 
84 /* ---- perform 1D interpolation in the x
85  boundary zones along the y direction ---- */
86 
87  if (side == X1_BEG){
88  for (k = box->kb; k <= box->ke; k++){
89  for (i = box->ib; i <= box->ie; i++){
90  JTOT_LOOP(j) qR[j] = U[k][j][i];
91  SB_ShearingInterp (qL, qR, t, X1_BEG, grid);
92  JDOM_LOOP(j) U[k][j][i] = qL[j];
93  }
94  }
95  }else if (side == X1_END){
96  for (k = box->kb; k <= box->ke; k++){
97  for (i = box->ib; i <= box->ie; i++){
98  JTOT_LOOP(j) qL[j] = U[k][j][i];
99  SB_ShearingInterp (qL, qR, t, X1_END, grid);
100  JDOM_LOOP(j) U[k][j][i] = qR[j];
101  }
102  }
103  }
104 
105 /* -- exchange values between processors to fill ghost zones -- */
106 
107  SB_FillBoundaryGhost (U, box, nghL, nghR, grid);
108  return;
109 }
110 
111 #ifdef PARALLEL
112 /* ********************************************************************* */
113 void SB_ShiftBoundaryVar(double ***q, RBox *box, int side, double t, Grid *grid)
114 /*!
115  * Split the 3D array q[k][j][i] in two buffers and send them
116  * to processors with rank dst1 and dst2.
117  * The box structure contains the original grid index ranges on
118  * top of which q is defined.
119  * At the same time, receive buffers from processors with rank
120  * src1,src2.
121  *
122  * \param [in,out] q 3D array
123  * \param [in] box the rectangular box giving the index range
124  * \param [in] side the side of the X1 boundary
125  * \param [in] t simulation time
126  * \param [in] grid pointer to an array of Grid structures
127  *
128  * \return This function has no return value.
129  ************************************************************************* */
130 {
131  int i, j, k, ngh_x, ngh_y;
132  int nx_buf1, ny_buf1, nz_buf1;
133  int nx_buf2, ny_buf2, nz_buf2;
134  long int count, buf1_size, buf2_size;
135 
136  int coords[3];
137  int dst1, dst2, src1, src2;
138  int Delta_j, shift_coordy_mod, shift_coordy_div;
139 
140  double scrh;
141  static double *snd_buf, *rcv_buf;
142 
143  RBox buf1, buf2;
144  RBox *pbuf1, *pbuf2;
145 
146  static MPI_Comm cartcomm;
147  MPI_Status status;
148 
149 /* -------------------------------------------------------------------- */
150 /*! We allocate static memory areas for send/receive buffers and
151  employ just one send buffer and one receive buffer with
152  size equals the full extent of the boundary side.
153  The two data chunks coming from q[k][j][i] are stored at
154  different positions in the send/recv buffers.
155 
156  In 3D staggered MHD we augment the buffer size by 1 point
157  in the z-direction for BZs.
158  There's no need to do the same for BYs periodic condition
159  will be applied later on. */
160 /* -------------------------------------------------------------------- */
161 
162  ngh_x = grid[IDIR].nghost;
163  ngh_y = grid[JDIR].nghost;
164  if (snd_buf == NULL){
165  int nz = NX3_TOT;
166  #if defined STAGGERED_MHD && DIMENSIONS == 3
167  nz += 1;
168  #endif
169  snd_buf = ARRAY_1D(nz*NX2_TOT*ngh_x, double);
170  rcv_buf = ARRAY_1D(nz*NX2_TOT*ngh_x, double);
171 
172  AL_Get_cart_comm(SZ, &cartcomm);
173  }
174 
175 /* -- compute buffer index offsets in x/z -- */
176 
177  buf1.ib = buf2.ib = box->ib;
178  buf1.ie = buf2.ie = box->ie;
179  buf1.kb = buf2.kb = box->kb;
180  buf1.ke = buf2.ke = box->ke;
181 
182 /* --------------------------------------------------------------------- */
183 /*! Depending on the boundary side, we set buffer offsets in the
184  y-direction and find the ranks of the processors to/from which we
185  must send/receive data.
186  Local processor sends data to processors dst1 and dst2
187  and receive data from processors src1 and src2.
188  \note parallel decomposition must result in equally sized
189  grids in the y-direction.
190  \verbatim
191  |________________| |_____[dst1]_____||____[dst2]______|
192  <----><--------> <----> <-------->
193  buf1 buf2 buf1 buf2
194  \endverbatim
195 */
196 /* --------------------------------------------------------------------- */
197 
198  scrh = (fmod(sb_vy*t, sb_Ly))/grid[JDIR].dx[JBEG];
199  Delta_j = (int)scrh;
200 
201  shift_coordy_mod = Delta_j%NX2;
202  shift_coordy_div = Delta_j/NX2;
203 
204  if (side == X1_BEG){
205 
206  ny_buf1 = NX2 - (shift_coordy_mod) + ngh_y;
207  ny_buf2 = ngh_y + (shift_coordy_mod);
208 
209  buf1.jb = 0; buf1.je = ny_buf1 - 1;
210  buf2.jb = 0; buf2.je = ny_buf2 - 1;
211 
212  for (i = 0; i < DIMENSIONS; i++) coords[i] = grid[i].rank_coord;
213  coords[JDIR] += shift_coordy_div;
214  MPI_Cart_rank(cartcomm, coords, &dst1);
215 
216  coords[JDIR] += 1;
217  MPI_Cart_rank(cartcomm, coords, &dst2);
218 
219  for (i = 0; i < DIMENSIONS; i++) coords[i] = grid[i].rank_coord;
220  coords[JDIR] -= shift_coordy_div;
221  MPI_Cart_rank(cartcomm, coords, &src1);
222 
223  coords[JDIR] -= 1;
224  MPI_Cart_rank(cartcomm, coords, &src2);
225 
226  }else if (side == X1_END){
227 
228  ny_buf1 = shift_coordy_mod + ngh_y;
229  ny_buf2 = ngh_y + (NX2 - shift_coordy_mod);
230 
231  buf1.jb = 0; buf1.je = ny_buf1 - 1;
232  buf2.jb = 0; buf2.je = ny_buf2 - 1;
233 
234  for (i = 0; i < DIMENSIONS; i++) coords[i] = grid[i].rank_coord;
235  coords[JDIR] -= shift_coordy_div;
236  MPI_Cart_rank(cartcomm, coords, &dst2);
237 
238  coords[JDIR] -= 1;
239  MPI_Cart_rank(cartcomm, coords, &dst1);
240 
241  for (i = 0; i < DIMENSIONS; i++) coords[i] = grid[i].rank_coord;
242  coords[JDIR] += shift_coordy_div;
243  MPI_Cart_rank(cartcomm, coords, &src2);
244 
245  coords[JDIR] += 1;
246  MPI_Cart_rank(cartcomm, coords, &src1);
247 
248  }else{
249  print1 ("! SB_ShiftBoundaryVar: wrong boundary\n");
250  QUIT_PLUTO(1);
251  }
252 
253 /* -- compute buffer sizes in the three directions -- */
254 
255  nx_buf1 = buf1.ie - buf1.ib + 1;
256  nx_buf2 = buf2.ie - buf2.ib + 1;
257 
258  ny_buf1 = buf1.je - buf1.jb + 1;
259  ny_buf2 = buf2.je - buf2.jb + 1;
260 
261  nz_buf1 = buf1.ke - buf1.kb + 1;
262  nz_buf2 = buf2.ke - buf2.kb + 1;
263 
264 /* -- total buffer size -- */
265 
266  buf1_size = nz_buf1*ny_buf1*nx_buf1;
267  buf2_size = nz_buf2*ny_buf2*nx_buf2;
268 
269 /* -- fill send buffers with values -- */
270 
271  pbuf1 = &buf1; /* pointer to RBox are used */
272  pbuf2 = &buf2; /* inside the BOX_LOOP macros */
273 
274  count = 0;
275  BOX_LOOP(pbuf1, k, j, i) snd_buf[count++] = q[k][JBEG+j][i];
276  BOX_LOOP(pbuf2, k, j, i) snd_buf[count++] = q[k][JEND-ny_buf2+1+j][i];
277 
278 /* -- Send/Receive buffers -- */
279 
280  MPI_Sendrecv(snd_buf, buf1_size, MPI_DOUBLE, dst1, 1,
281  rcv_buf, buf1_size, MPI_DOUBLE, src1, 1,
282  cartcomm, &status);
283 
284  MPI_Sendrecv(snd_buf + buf1_size, buf2_size, MPI_DOUBLE, dst2, 2,
285  rcv_buf + buf1_size, buf2_size, MPI_DOUBLE, src2, 2,
286  cartcomm, &status);
287 
288 /* -- store received buffers in the correct locations -- */
289 
290  count = 0;
291  BOX_LOOP(pbuf1, k, j, i) q[k][j+ny_buf2][i] = rcv_buf[count++];
292  BOX_LOOP(pbuf2, k, j, i) q[k][j][i] = rcv_buf[count++];
293 }
294 #endif /* PARALLEL */
295 
296 /* ********************************************************************* */
297 void SB_FillBoundaryGhost(double ***U, RBox *box,
298  int nghL, int nghR, Grid *grid)
299 /*!
300  * Fill ghost zones in the Y direction in the X1_BEG and
301  * X1_END boundary regions.
302  *
303  * \param [in,out] U a 3D data array
304  * \param [in,out] box the RBox structure containing the grid indices in
305  * the x and z directions. Indices in the y-directions
306  * are reset here for convenience.
307  * \param [in] nghL number of ghost zones on the left
308  * \param [in] nghR number of ghost zones on the right
309  * \param [in] grid pointer to an
310  * \note nghL and nghR can be different for staggered fields like BY.
311  *
312  * \return This function has no return value.
313  *********************************************************************** */
314 {
315  int i, j, k;
316  int jb0, je0;
317  #ifdef PARALLEL
318  int coords[3];
319  long int count, buf_size;
320  static double *snd_buf1, *snd_buf2, *rcv_buf1, *rcv_buf2;
321  static int dst1, dst2;
322  static MPI_Comm cartcomm;
323  MPI_Status status;
324  #endif
325 
326 /* -------------------------------------------------------
327  save a copy of the original grid indices in the y-dir
328  ------------------------------------------------------- */
329 
330  jb0 = box->jb;
331  je0 = box->je;
332 
333 /* ------------------------------------------------------------------
334  impose periodic b.c. if there's only 1 processor along y
335  ------------------------------------------------------------------ */
336 
337  if (grid[JDIR].nproc == 1){
338  box->jb = JBEG - nghL;
339  box->je = JBEG - 1;
340  BOX_LOOP(box, k, j, i) U[k][j][i] = U[k][j + NX2][i];
341 
342  box->jb = JEND + 1;
343  box->je = JEND + nghR;
344  BOX_LOOP(box, k, j, i) U[k][j][i] = U[k][j - NX2][i];
345 
346  box->jb = jb0; box->je = je0;
347  return;
348  }
349 
350 #ifdef PARALLEL
351 
352 /* ----------------------------------------------------------------
353  - allocate memory for maximum buffer size;
354  - get destination ranks of the processors lying above and
355  below current processor.
356  This is done only once at the beginning since parallel
357  decomposition is not going to change during the computation.
358  ---------------------------------------------------------------- */
359 
360  if (snd_buf1 == NULL){
361 
362  i = grid[IDIR].nghost;
363  j = grid[JDIR].nghost;
364  k = NX3_TOT;
365  #ifdef STAGGERED_MHD
366  j++;
367  k++;
368  #endif
369  snd_buf1 = ARRAY_1D(i*j*k, double);
370  snd_buf2 = ARRAY_1D(i*j*k, double);
371  rcv_buf1 = ARRAY_1D(i*j*k, double);
372  rcv_buf2 = ARRAY_1D(i*j*k, double);
373 
374  AL_Get_cart_comm(SZ, &cartcomm);
375 
376  for (i = 0; i < DIMENSIONS; i++) coords[i] = grid[i].rank_coord;
377  coords[JDIR] -= 1;
378  MPI_Cart_rank(cartcomm, coords, &dst1);
379 
380  coords[JDIR] += 2;
381  MPI_Cart_rank(cartcomm, coords, &dst2);
382 
383  }
384 
385 /* -----------------------------------------------------------------
386  Send/receive buffers.
387  Buffer snd_buf1 is sent to rank [dst1] while snd_buf2 is sent
388  to rank [dst2] as follows:
389 
390 
391  |_____[dst1]_____| |________________| |_____[dst2]_____|
392  <---> <--->
393  <----- buf1 buf2 ---->
394  ----------------------------------------------------------------- */
395 
396 /* -- send buffer at JBEG -- */
397 
398  count = 0;
399  box->jb = JBEG;
400  box->je = JBEG + nghR - 1;
401  buf_size = (box->ke - box->kb + 1)
402  *(box->je - box->jb + 1)
403  *(box->ie - box->ib + 1);
404  BOX_LOOP(box, k, j, i) snd_buf1[count++] = U[k][j][i];
405 
406  MPI_Sendrecv(snd_buf1, buf_size, MPI_DOUBLE, dst1, 1,
407  rcv_buf2, buf_size, MPI_DOUBLE, dst2, 1,
408  cartcomm, &status);
409 
410 /* -- send buffer at JEND -- */
411 
412  count = 0;
413  box->jb = JEND - nghL + 1;
414  box->je = JEND;
415  buf_size = (box->ke - box->kb + 1)
416  *(box->je - box->jb + 1)
417  *(box->ie - box->ib + 1);
418  BOX_LOOP(box, k, j, i) snd_buf2[count++] = U[k][j][i];
419 
420  MPI_Sendrecv(snd_buf2, buf_size, MPI_DOUBLE, dst2, 2,
421  rcv_buf1, buf_size, MPI_DOUBLE, dst1, 2,
422  cartcomm, &status);
423 
424 /* -- place buffers in the correct position -- */
425 
426  count = 0;
427  box->jb = JEND + 1;
428  box->je = JEND + nghR;
429  BOX_LOOP(box, k, j, i) U[k][j][i] = rcv_buf2[count++];
430 
431  count = 0;
432  box->jb = JBEG - nghL;
433  box->je = JBEG - 1;
434  BOX_LOOP(box, k, j, i) U[k][j][i] = rcv_buf1[count++];
435 
436 /* -- restore original grid index in the y-dir -- */
437 
438  box->jb = jb0; box->je = je0;
439 
440 #endif /* PARALLEL */
441 }
442 
443 /* ********************************************************************* */
444 void SB_ShearingInterp (double *qL, double *qR, double t, int side,
445  Grid *grid)
446 /*!
447  * Shift the 1D arrays qL or qR by an amount \f$ wt =
448  * \Delta y\Delta J + \epsilon \f$ (if there's only one processor in y)
449  * or just by \f$ \epsilon \f$ (otherwise) by advecting the initial
450  * profile of qR (when side == X1_BEG) or qL (when side == X1_END).
451  *
452  * \param [in,out] qL 1D array containing the un-shifted values on the
453  * left boundary
454  * \param [in,out] qR 1D array containing the un-shifted values on the
455  * right boundary
456  * \param [in] t simulation time
457  * \param [in] side boundary side
458  * \param [in] grid pointer to an array of Grid structures
459  *
460  * \return This function has no return value.
461  *********************************************************************** */
462 {
463  int j, Delta_j, nghost;
464  int jS, jN, jp, jm;
465  int isgn;
466  double Delta_L, Delta_y, dyfrac;
467  double scrh;
468  double *q_from, *q_to;
469  static double *dq, *dql, *flux;
470 
471  if (dq == NULL){
472  dq = ARRAY_1D(NMAX_POINT, double);
473  dql = ARRAY_1D(NMAX_POINT, double);
474  flux = ARRAY_1D(NMAX_POINT, double);
475  }
476 
477  Delta_y = grid[JDIR].dx[JBEG];
478  nghost = grid[JDIR].nghost;
479 
480  if (side == X1_BEG){
481  q_from = qR;
482  q_to = qL;
483  isgn = -1;
484  }else if (side == X1_END){
485  q_from = qL;
486  q_to = qR;
487  isgn = 1;
488  }
489 
490  Delta_L = fmod(isgn*sb_vy*t, sb_Ly);
491  scrh = Delta_L/Delta_y;
492 
493 /* ---- shift index and fractional remainder inside cell ---- */
494 
495  Delta_j = (int)scrh;
496  dyfrac = fabs(scrh) - floor(fabs(scrh));
497 
498 /* -- in parallel mode, shift has alrady been done at this point -- */
499 
500  #ifdef PARALLEL
501  if (grid[JDIR].nproc > 1) Delta_j = 0;
502  #endif
503 
504 /* -- compute limited slopes & interpolate -- */
505 
506  for (j = 1; j <= JEND + nghost; j++) {
507  dq[j] = q_from[j] - q_from[j - 1];
508  }
509 
510  #if SB_ORDER == 1 /* -- 1st order interpolation -- */
511 
512  for (j = 0; j <= JEND + nghost; j++) dql[j] = 0.0;
513 
514  if (side == X1_BEG)
515  for (j = JBEG - 1; j <= JEND; j++) flux[j] = q_from[j];
516  else if (side == X1_END)
517  for (j = JBEG - 1; j <= JEND; j++) flux[j] = q_from[j + 1];
518 
519  #elif SB_ORDER == 2 /* -------------------------------------------
520  2nd order interpolation uses the standard
521  conservative MUSCL-Hancock scheme
522  ------------------------------------------ */
523 /* vanleer_lim (dq + 1, dq, dql, JBEG - 1, JEND + 1, grid); */
524  for (j = JBEG - 1; j <= JEND + 1; j++) dql[j] = VAN_LEER(dq[j+1], dq[j]);
525 
526  if (side == X1_BEG){
527  for (j = JBEG - 1; j <= JEND; j++) {
528  flux[j] = q_from[j] + 0.5*dql[j]*(1.0 - dyfrac);
529  }
530  }else if (side == X1_END){
531  for (j = JBEG - 1; j <= JEND; j++) {
532  flux[j] = q_from[j + 1] - 0.5*dql[j + 1]*(1.0 - dyfrac);
533  }
534  }
535 
536  #elif SB_ORDER == 3 /* --------------------------------------
537  3rd order interpolation uses the
538  traditional PPM method.
539  -------------------------------------- */
540  static double *qp, *qm;
541  double ap, am, P0, P1, P2;
542 
543  if (qp == NULL){
544  qp = ARRAY_1D(NMAX_POINT, double);
545  qm = ARRAY_1D(NMAX_POINT, double);
546  if ( (JBEG - 3) < 0) {
547  printf ("! SB_ShearingInterp: the value of SB_ORDER (%d)\n",
548  SB_ORDER);
549  printf ("! is not consistent with underlying algorithm\n");
550  QUIT_PLUTO(1);
551  }
552  }
553 
554 /* mc_lim (dq + 1, dq, dql, JBEG - 2, JEND + 2, grid); */
555  for (j = JBEG - 2; j <= JEND + 2; j++) dql[j] = MC(dq[j+1], dq[j]);
556  for (j = JBEG - 1; j <= JEND + 1; j++){
557  qp[j] = 0.5*(q_from[j] + q_from[j+1]) - (dql[j+1] - dql[j])/6.0;
558  qm[j] = 0.5*(q_from[j] + q_from[j-1]) - (dql[j] - dql[j-1])/6.0;
559 
560  ap = qp[j] - q_from[j];
561  am = qm[j] - q_from[j];
562 
563  if (ap*am >= 0.0) {
564  ap = am = 0.0;
565  }else{
566  if (fabs(ap) >= 2.0*fabs(am)) ap = -2.0*am;
567  if (fabs(am) >= 2.0*fabs(ap)) am = -2.0*ap;
568  }
569  qp[j] = q_from[j] + ap;
570  qm[j] = q_from[j] + am;
571  }
572 
573  if (side == X1_BEG){
574  for (j = JBEG - 1; j <= JEND; j++){
575  P0 = 1.5*q_from[j] - 0.25*(qp[j] + qm[j]);
576  P1 = qp[j] - qm[j];
577  P2 = 3.0*(qp[j] - 2.0*q_from[j] + qm[j]);
578  flux[j] = P0 + 0.5*P1*(1.0 - dyfrac) +
579  P2*(0.25 - 0.5*dyfrac + dyfrac*dyfrac/3.0);
580  }
581  }else if (side == X1_END) {
582  for (j = JBEG - 1; j <= JEND; j++){
583  P0 = 1.5*q_from[j + 1] - 0.25*(qp[j + 1] + qm[j + 1]);
584  P1 = qp[j + 1] - qm[j + 1];
585  P2 = 3.0*(qp[j + 1] - 2.0*q_from[j + 1] + qm[j + 1]);
586  flux[j] = P0 - 0.5*P1*(1.0 - dyfrac) +
587  P2*(0.25 - 0.5*dyfrac + dyfrac*dyfrac/3.0);
588  }
589  }
590 
591  #else
592 
593  print ("! SB_ORDER should lie between 1 and 3\n");
594  QUIT_PLUTO(1);
595 
596  #endif
597 
598 /* -------------------------------------------------------------
599  Assign volumetric sliding averages.
600  The match point falls on the opposite side
601  between jS and jN = jS + 1.
602 
603  At t = 0:
604 
605  - point j on the left matches jN on the right;
606  - point j on the right matches jS on the left;
607 
608  In any case,
609 
610  (j + Delta_j) --> jN if side is X1_BEG
611  (j + Delta_j) --> jS if side is X1_BEG
612 
613  and the advection operator can be defined in terms
614  of jup = (j + Delta_j):
615 
616  q_to[j] = q_from[jup] -+ dyfrac*(flux[jup] - flux[jup-1]);
617 
618  with - (+) for left (right) if dyfrac is always positive.
619  ------------------------------------------------------------- */
620 
621  if (side == X1_END) dyfrac *= -1.0;
622 
623  if (grid[JDIR].nproc == 1){
624  for (j = JBEG; j <= JEND; j++){
625  jp = SB_JSHIFT(j + Delta_j);
626  jm = SB_JSHIFT(jp - 1);
627  q_to[j] = q_from[jp] - dyfrac*(flux[jp] - flux[jm]);
628  }
629  }else{
630  for (j = JBEG; j <= JEND; j++){
631  jp = j;
632  jm = j-1;
633  q_to[j] = q_from[jp] - dyfrac*(flux[jp] - flux[jm]);
634  }
635  }
636 }
637 /* ********************************************************************* */
638 int SB_JSHIFT (int jc)
639 /*!
640  * Make sure jc always fall between JBEG and JEND
641  *
642  *********************************************************************** */
643 {
644  int jj;
645 
646  jj = jc;
647  if (jj > JEND) jj -= (int)NX2;
648  if (jj < JBEG) jj += (int)NX2;
649 
650  if (jj < JBEG || jj > JEND){
651  print (" ! j index out of range in JSHIFT %d, %d\n", jc, jj);
652  QUIT_PLUTO(1);
653  }
654 
655  return(jj);
656 }
657 
#define X1_BEG
Boundary region at X1 beg.
Definition: pluto.h:146
int jb
Lower corner index in the x2 direction.
Definition: structs.h:349
void SB_ShearingInterp(double *qL, double *qR, double t, int side, Grid *grid)
Definition: sb_tools.c:444
void print1(const char *fmt,...)
Definition: amrPluto.cpp:511
tuple scrh
Definition: configure.py:200
#define BOX_LOOP(B, k, j, i)
Definition: macros.h:70
long int NX2_TOT
Total number of zones in the X2 direction (boundaries included) for the local processor.
Definition: globals.h:57
int kb
Lower corner index in the x3 direction.
Definition: structs.h:351
#define SB_ORDER
Definition: shearingbox.h:29
#define VAN_LEER(a, b)
Definition: macros.h:113
#define X1_END
Boundary region at X1 end.
Definition: pluto.h:147
double * dx
Definition: structs.h:83
void SB_FillBoundaryGhost(double ***U, RBox *box, int nghL, int nghR, Grid *grid)
Definition: sb_tools.c:297
int ib
Lower corner index in the x1 direction.
Definition: structs.h:347
#define JDOM_LOOP(j)
Definition: macros.h:35
int AL_Get_cart_comm(int, MPI_Comm *)
Definition: al_sz_get.c:117
int SB_JSHIFT(int jc)
Definition: sb_tools.c:638
long int NX2
Number of interior zones in the X2 directions (boundaries excluded) for the local processor...
Definition: globals.h:50
#define IDIR
Definition: pluto.h:193
Definition: structs.h:78
long int NX3_TOT
Total number of zones in the X3 direction (boundaries included) for the local processor.
Definition: globals.h:59
int j
Definition: analysis.c:2
int nghost
Number of ghost zones.
Definition: structs.h:104
int k
Definition: analysis.c:2
double sb_vy
Velocity offset (>0), in SB_Boundary().
Definition: sb_boundary.c:24
void print(const char *fmt,...)
Definition: amrPluto.cpp:497
double g_domBeg[3]
Lower limits of the computational domain.
Definition: globals.h:125
void SB_SetBoundaryVar(double ***U, RBox *box, int side, double t, Grid *grid)
Definition: sb_tools.c:36
#define MC(a, b)
Definition: macros.h:114
PLUTO main header file.
#define ARRAY_1D(nx, type)
Definition: prototypes.h:170
long int NMAX_POINT
Maximum number of points among the three directions, boundaries excluded.
Definition: globals.h:62
#define JTOT_LOOP(j)
Definition: macros.h:39
int i
Definition: analysis.c:2
int ie
Upper corner index in the x1 direction.
Definition: structs.h:348
int ke
Upper corner index in the x3 direction.
Definition: structs.h:352
int je
Upper corner index in the x2 direction.
Definition: structs.h:350
double g_domEnd[3]
Upper limits of the computational domain.
Definition: globals.h:126
Definition: al_hidden.h:38
#define JDIR
Definition: pluto.h:194
static double sb_Ly
Definition: sb_tools.c:33
long int JBEG
Lower grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:39
Definition: structs.h:346
static Runtime q
#define QUIT_PLUTO(e_code)
Definition: macros.h:125
long int JEND
Upper grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:41
#define DIMENSIONS
Definition: definitions_01.h:2