PLUTO
initialize.c
Go to the documentation of this file.
1 /* ///////////////////////////////////////////////////////////////////// */
2 /*!
3  \file
4  \brief Initialize PLUTO.
5 
6  Initialize() performs a number of initialization tasks before
7  starting the main computation loop.\n
8  More precisely, it completes the following sequence of steps:
9 
10  - parse command line options
11  - runtime initialization (i.e. pluto.ini)
12  - parallel domain decomposition
13  - grid generation
14  - memory allocation
15  - initial conditions
16  - set output attributes
17 
18  The function GetDecompMode() sets the parallel domain decomposition
19  mode which can be equal to
20 
21  - AL_AUTO_DECOMP default;
22  - AL_USER_DECOMP if the -dec n1 [n2] [n3] command line
23  argument has been given;
24  In this case only, procs[] contains the number
25  of processors in the three directions;
26  - AL_MPI_DECOMP [todo]
27 
28  \author A. Mignone (mignone@ph.unito.it)
29  \date Aug 24, 2015
30 */
31 /* ///////////////////////////////////////////////////////////////////// */
32 #include "pluto.h"
33 
34 static int GetDecompMode (Cmd_Line *cmd_line, int procs[]);
35 
36 /* ********************************************************************* */
37 void Initialize(int argc, char *argv[], Data *data,
38  Runtime *runtime, Grid *grid, Cmd_Line *cmd_line)
39 /*!
40  * Initialize computational grid, domain decomposition and memory
41  * allocation. Also, set initial conditions and output attributes.
42  *
43  * \param [in] argc the number of command-line argument passed to
44  * the code
45  * \param [in] argv the argument value as a 1D array of char
46  * \param [in,out] data a pointer to the main PLUTO data structure
47  * \param [in,out] runtime a pointer to the Runtime structure
48  * \param [in] grid pointer to an array of Grid structures
49  * \param [in] cmd_line pointer to the Cmd_Line structure
50  *
51  * \return This function has no return value.
52  *********************************************************************** */
53 {
54  int nprocs, decomp_mode;
55  int i, j, k, idim, nv;
56  int nx, ny, nz, nghost, status;
57  int gsize[DIMENSIONS], lsize[DIMENSIONS];
58  int beg[DIMENSIONS], end[DIMENSIONS];
59  int gbeg[DIMENSIONS], gend[DIMENSIONS];
60  int lbeg[DIMENSIONS], lend[DIMENSIONS];
61  int is_gbeg[DIMENSIONS], is_gend[DIMENSIONS];
62  int ghosts[DIMENSIONS];
63  int periods[DIMENSIONS];
64  int pardim[DIMENSIONS], stagdim[DIMENSIONS];
65  int procs[DIMENSIONS];
66  char ini_file[128];
67  double scrh, dxmin[3], dxming[3];
68  Output *output;
69  #ifdef PARALLEL
70  MPI_Datatype rgb_type;
71  MPI_Datatype Float_Vect_type;
72  #endif
73 
74 /* -- set default input file name -- */
75 
76  sprintf (ini_file,"pluto.ini");
77 
78 /* -- parse command line options -- */
79 
80  ParseCmdLineArgs (argc, argv, ini_file, cmd_line);
81 
82  #ifdef PARALLEL
83 
84 /* -- get number of processors -- */
85 
86  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
87 
88 /* -- read initialization file -- */
89 
90  if (prank == 0) RuntimeSetup (runtime, cmd_line, ini_file);
91  MPI_Bcast (runtime, sizeof (struct RUNTIME) , MPI_BYTE, 0, MPI_COMM_WORLD);
92 
93 /* -- get number of ghost zones and set periodic boundaries -- */
94 
95  nghost = GetNghost();
96  MPI_Allreduce (&nghost, &idim, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
97  nghost = idim;
98 
99  for (idim = 0; idim < DIMENSIONS; idim++) {
100  gsize[idim] = runtime->npoint[idim];
101  ghosts[idim] = nghost;
102  periods[idim] = (runtime->left_bound[idim] == PERIODIC ? 1:0)
103  || (runtime->left_bound[idim] == SHEARING ? 1:0);
104  pardim[idim] = cmd_line->parallel_dim[idim];
105  }
106 
107 /* -- find parallel decomposition mode and number of processors -- */
108 
109  decomp_mode = GetDecompMode(cmd_line, procs);
110 
111 /* ---- double distributed array descriptor ---- */
112 
113 /* SetDistributedArray (SZ, type, gsize, periods, stagdim);
114  return args: beg, end, lsize, lbeg, lend, gbeg, gend, is_gbeg, is_gend
115 */
116 
117  AL_Sz_init (MPI_COMM_WORLD, &SZ);
118  AL_Set_type (MPI_DOUBLE, 1, SZ);
119  AL_Set_dimensions (DIMENSIONS, SZ);
120  AL_Set_global_dim (gsize, SZ);
121  AL_Set_ghosts (ghosts, SZ);
122  AL_Set_periodic_dim (periods, SZ);
123  AL_Set_parallel_dim (pardim, SZ);
124 
125  AL_Decompose (SZ, procs, decomp_mode);
126  AL_Get_local_dim (SZ, lsize);
127  AL_Get_bounds (SZ, beg, end, ghosts, AL_C_INDEXES);
128  AL_Get_lbounds (SZ, lbeg, lend, ghosts, AL_C_INDEXES);
129  AL_Get_gbounds (SZ, gbeg, gend, ghosts, AL_C_INDEXES);
130  AL_Is_boundary (SZ, is_gbeg, is_gend);
131 
132 /* ---- float distributed array descriptor ---- */
133 
134  AL_Sz_init (MPI_COMM_WORLD, &SZ_float);
135  AL_Set_type (MPI_FLOAT, 1, SZ_float);
136  AL_Set_dimensions (DIMENSIONS, SZ_float);
137  AL_Set_global_dim (gsize, SZ_float);
138  AL_Set_ghosts (ghosts, SZ_float);
139  AL_Set_periodic_dim (periods, SZ_float);
140  AL_Set_parallel_dim (pardim, SZ_float);
141 
142  AL_Decompose (SZ_float, procs, decomp_mode);
143  AL_Get_local_dim (SZ_float, lsize);
144  AL_Get_bounds (SZ_float, beg, end, ghosts, AL_C_INDEXES);
145  AL_Get_lbounds (SZ_float, lbeg, lend, ghosts, AL_C_INDEXES);
146  AL_Get_gbounds (SZ_float, gbeg, gend, ghosts, AL_C_INDEXES);
147  AL_Is_boundary (SZ_float, is_gbeg, is_gend);
148 
149 /* ---- char distributed array descriptor ---- */
150 
151  AL_Sz_init (MPI_COMM_WORLD, &SZ_char);
152  AL_Set_type (MPI_CHAR, 1, SZ_char);
153  AL_Set_dimensions (DIMENSIONS, SZ_char);
154  AL_Set_global_dim (gsize, SZ_char);
155  AL_Set_ghosts (ghosts, SZ_char);
156  AL_Set_periodic_dim (periods, SZ_char);
157  AL_Set_parallel_dim (pardim, SZ_char);
158 
159  AL_Decompose (SZ_char, procs, decomp_mode);
160  AL_Get_local_dim (SZ_char, lsize);
161  AL_Get_bounds (SZ_char, beg, end, ghosts, AL_C_INDEXES);
162  AL_Get_lbounds (SZ_char, lbeg, lend, ghosts, AL_C_INDEXES);
163  AL_Get_gbounds (SZ_char, gbeg, gend, ghosts, AL_C_INDEXES);
164  AL_Is_boundary (SZ_char, is_gbeg, is_gend);
165 
166 /* ---- Float_Vec distributed array descriptor ---- */
167 
168  MPI_Type_contiguous (3, MPI_FLOAT, &Float_Vect_type);
169  MPI_Type_commit (&Float_Vect_type);
170 
171  AL_Sz_init (MPI_COMM_WORLD, &SZ_Float_Vect);
172  AL_Set_type (MPI_FLOAT, 3, SZ_Float_Vect);
173  AL_Set_dimensions (DIMENSIONS, SZ_Float_Vect);
175  AL_Set_ghosts (ghosts, SZ_Float_Vect);
178 
179  AL_Decompose (SZ_Float_Vect, procs, decomp_mode);
181  AL_Get_bounds (SZ_Float_Vect, beg, end, ghosts, AL_C_INDEXES);
182  AL_Get_lbounds (SZ_Float_Vect, lbeg, lend, ghosts, AL_C_INDEXES);
183  AL_Get_gbounds (SZ_Float_Vect, gbeg, gend, ghosts, AL_C_INDEXES);
184  AL_Is_boundary (SZ_Float_Vect, is_gbeg, is_gend);
185 
186  for (idim = 0; idim < DIMENSIONS; idim++) {
187  grid[idim].nghost = nghost;
188  grid[idim].np_tot = lsize[idim] + 2*ghosts[idim];
189  grid[idim].np_int = lsize[idim];
190  grid[idim].np_tot_glob = runtime->npoint[idim] + 2*ghosts[idim];
191  grid[idim].np_int_glob = runtime->npoint[idim];
192  grid[idim].beg = beg[idim];
193  grid[idim].end = end[idim];
194  grid[idim].gbeg = gbeg[idim];
195  grid[idim].gend = gend[idim];
196  grid[idim].lbeg = lbeg[idim];
197  grid[idim].lend = lend[idim];
198  grid[idim].lbound = runtime->left_bound[idim]*is_gbeg[idim];
199  grid[idim].rbound = runtime->right_bound[idim]*is_gend[idim];
200  grid[idim].nproc = procs[idim];
201  }
202 
203 /* -- Find total number of processors & decomposition mode -- */
204 
205 /* AL_Get_size(SZ, &nprocs); Replaced by MPI_Comm_size above */
206 
207  #ifdef STAGGERED_MHD
208 
209 /* ---- x-staggered array descriptor (double) ---- */
210 
211  #if DIMENSIONS >= 1
212  D_EXPAND(stagdim[IDIR] = AL_TRUE; ,
213  stagdim[JDIR] = AL_FALSE; ,
214  stagdim[KDIR] = AL_FALSE;)
215 
216  DIM_LOOP(idim) gsize[idim] = runtime->npoint[idim];
217  gsize[IDIR] += 1;
218 
219  #ifdef SHEARINGBOX
220  periods[IDIR] = 0;
221  #endif
222 
223  AL_Sz_init (MPI_COMM_WORLD, &SZ_stagx);
224  AL_Set_type (MPI_DOUBLE, 1, SZ_stagx);
225  AL_Set_dimensions (DIMENSIONS, SZ_stagx);
226  AL_Set_global_dim (gsize, SZ_stagx);
227  AL_Set_ghosts (ghosts, SZ_stagx);
228  AL_Set_staggered_dim(stagdim, SZ_stagx);
229  AL_Set_periodic_dim (periods, SZ_stagx);
230  AL_Set_parallel_dim (pardim, SZ_stagx);
231 
232  AL_Decompose (SZ_stagx, procs, decomp_mode);
233  AL_Get_local_dim (SZ_stagx, lsize);
234  AL_Get_bounds (SZ_stagx, beg, end, ghosts, AL_C_INDEXES);
235  AL_Get_lbounds (SZ_stagx, lbeg, lend, ghosts, AL_C_INDEXES);
236  AL_Get_gbounds (SZ_stagx, gbeg, gend, ghosts, AL_C_INDEXES);
237  AL_Is_boundary (SZ_stagx, is_gbeg, is_gend);
238 
239  #ifdef SHEARINGBOX
240  periods[IDIR] = 1;
241  #endif
242  #endif
243 
244  #if DIMENSIONS >= 2
245  D_EXPAND(stagdim[IDIR] = AL_FALSE; ,
246  stagdim[JDIR] = AL_TRUE; ,
247  stagdim[KDIR] = AL_FALSE;)
248 
249  DIM_LOOP(idim) gsize[idim] = runtime->npoint[idim];
250  gsize[JDIR] += 1;
251 
252  AL_Sz_init (MPI_COMM_WORLD, &SZ_stagy);
253  AL_Set_type (MPI_DOUBLE, 1, SZ_stagy);
254  AL_Set_dimensions (DIMENSIONS, SZ_stagy);
255  AL_Set_global_dim (gsize, SZ_stagy);
256  AL_Set_ghosts (ghosts, SZ_stagy);
257  AL_Set_staggered_dim(stagdim, SZ_stagy);
258  AL_Set_periodic_dim (periods, SZ_stagy);
259  AL_Set_parallel_dim (pardim, SZ_stagy);
260 
261  AL_Decompose (SZ_stagy, procs, decomp_mode);
262  AL_Get_local_dim (SZ_stagy, lsize);
263  AL_Get_bounds (SZ_stagy, beg, end, ghosts, AL_C_INDEXES);
264  AL_Get_lbounds (SZ_stagy, lbeg, lend, ghosts, AL_C_INDEXES);
265  AL_Get_gbounds (SZ_stagy, gbeg, gend, ghosts, AL_C_INDEXES);
266  AL_Is_boundary (SZ_stagy, is_gbeg, is_gend);
267  #endif
268 
269  #if DIMENSIONS == 3
270  D_EXPAND(stagdim[IDIR] = AL_FALSE; ,
271  stagdim[JDIR] = AL_FALSE; ,
272  stagdim[KDIR] = AL_TRUE;)
273 
274  DIM_LOOP(idim) gsize[idim] = runtime->npoint[idim];
275  gsize[KDIR] += 1;
276 
277  AL_Sz_init (MPI_COMM_WORLD, &SZ_stagz);
278  AL_Set_type (MPI_DOUBLE, 1, SZ_stagz);
279  AL_Set_dimensions (DIMENSIONS, SZ_stagz);
280  AL_Set_global_dim (gsize, SZ_stagz);
281  AL_Set_ghosts (ghosts, SZ_stagz);
282  AL_Set_staggered_dim(stagdim, SZ_stagz);
283  AL_Set_periodic_dim (periods, SZ_stagz);
284  AL_Set_parallel_dim (pardim, SZ_stagz);
285 
286  AL_Decompose (SZ_stagz, procs, decomp_mode);
287  AL_Get_local_dim (SZ_stagz, lsize);
288  AL_Get_bounds (SZ_stagz, beg, end, ghosts, AL_C_INDEXES);
289  AL_Get_lbounds (SZ_stagz, lbeg, lend, ghosts, AL_C_INDEXES);
290  AL_Get_gbounds (SZ_stagz, gbeg, gend, ghosts, AL_C_INDEXES);
291  AL_Is_boundary (SZ_stagz, is_gbeg, is_gend);
292  #endif
293 
294  #endif /* STAGGERED_MHD */
295 
296  /* -- find processors coordinates in a Cartesian topology -- */
297 
298  {
299  int coords[3] = {0,0,0};
300  int rank;
301  MPI_Comm cartcomm;
302  AL_Get_cart_comm(SZ, &cartcomm);
303  MPI_Cart_get(cartcomm, DIMENSIONS, procs, periods, coords);
304  MPI_Cart_rank(cartcomm, coords, &rank);
305  if (rank != prank) {
306  printf ("! Initialize: rank and prank are different\n");
307  QUIT_PLUTO(1);
308  }
309  for (idim = 0; idim < DIMENSIONS; idim++) {
310  grid[idim].rank_coord = coords[idim];
311  }
312  }
313 
314  #else /* if NOT PARALLEL */
315 
316 /* -----------------------------------------------------
317  Serial Initialization
318  ----------------------------------------------------- */
319 
320  RuntimeSetup (runtime, cmd_line, ini_file);
321  nghost = GetNghost();
322 
323  for (idim = 0; idim < DIMENSIONS; idim++) {
324  grid[idim].nghost = nghost;
325  grid[idim].np_int = grid[idim].np_int_glob = runtime->npoint[idim];
326  grid[idim].np_tot = grid[idim].np_tot_glob = runtime->npoint[idim] + 2*nghost;
327  grid[idim].beg = grid[idim].gbeg = grid[idim].lbeg = nghost;
328  grid[idim].end = grid[idim].gend = grid[idim].lend
329  = (grid[idim].lbeg - 1) + grid[idim].np_int;
330  grid[idim].lbound = runtime->left_bound[idim];
331  grid[idim].rbound = runtime->right_bound[idim];
332  grid[idim].nproc = 1;
333  }
334  nprocs = 1;
335 
336  #endif
337 
338  RuntimeSet (runtime);
339 
340 /* ----------------------------------------------------
341  Set output directory and log file
342  ---------------------------------------------------- */
343 
344  SetLogFile (runtime->output_dir, cmd_line);
345  ShowConfig (argc, argv, ini_file);
346 
347 /* ---------------------------------------------------
348  Grid Generation
349  --------------------------------------------------- */
350 
351  print1 ("\n> Generating grid...\n\n");
352  SetGrid (runtime, grid);
353  Where (-1, grid); /* -- store grid inside the "Where"
354  function for subsequent calls -- */
355  if (cmd_line->makegrid == YES) {
356  print1 ("\n> Done < \n");
357  QUIT_PLUTO(0);
358  }
359 
360 /* ------------------------------------------------
361  Initialize global variables
362  ------------------------------------------------ */
363 
364  g_dt = runtime->first_dt;
365  g_time = 0.0;
366  g_maxMach = 0.0;
367  g_maxRiemannIter = 0;
368  g_usedMemory = 0;
369 
370  IBEG = grid[IDIR].lbeg; IEND = grid[IDIR].lend;
371  JBEG = grid[JDIR].lbeg; JEND = grid[JDIR].lend;
372  KBEG = grid[KDIR].lbeg; KEND = grid[KDIR].lend;
373 
374  NX1 = grid[IDIR].np_int;
375  NX2 = grid[JDIR].np_int;
376  NX3 = grid[KDIR].np_int;
377 
378  NX1_TOT = grid[IDIR].np_tot;
379  NX2_TOT = grid[JDIR].np_tot;
380  NX3_TOT = grid[KDIR].np_tot;
381 
382 /* ---------------------------------------
383  Get the maximum number of points
384  among all directions
385  --------------------------------------- */
386 
389 
390 /* ---------------------------------------------
391  Define the RBox structures.
392  --------------------------------------------- */
393 
394  SetRBox();
395 
396 /* --------------------------------------------------------------------
397  Find the minimum physical cell length for each direction
398  -------------------------------------------------------------------- */
399 
400  for (idim = 0; idim < DIMENSIONS; idim++) dxmin[idim] = 1.e30;
401 
402  for (i = IBEG; i <= IEND; i++) {
403  for (j = JBEG; j <= JEND; j++) {
404  for (k = KBEG; k <= KEND; k++) {
405 
406  scrh = Length_1(i, j, k, grid);
407  dxmin[IDIR] = MIN (dxmin[IDIR], scrh);
408 
409  scrh = Length_2(i, j, k, grid);
410  dxmin[JDIR] = MIN (dxmin[JDIR], scrh);
411 
412  scrh = Length_3(i, j, k, grid);
413  dxmin[KDIR] = MIN (dxmin[KDIR], scrh);
414 
415  }}}
416 
417  #ifdef PARALLEL
418  MPI_Allreduce (dxmin, dxming, 3, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
419  dxmin[IDIR] = dxming[IDIR];
420  dxmin[JDIR] = dxming[JDIR];
421  dxmin[KDIR] = dxming[KDIR];
422  #endif
423 
424  grid[IDIR].dl_min = dxmin[IDIR];
425  grid[JDIR].dl_min = dxmin[JDIR];
426  grid[KDIR].dl_min = dxmin[KDIR];
427 
428 /* --------------------------------------------------------------
429  Define geometrical coefficients for linear interpolation
430  -------------------------------------------------------------- */
431 
432  PLM_CoefficientsSet (grid); /* -- these may be needed by
433  shock flattening algorithms */
434  #if RECONSTRUCTION == PARABOLIC
435  PPM_CoefficientsSet (grid);
436  #endif
437 
438 /* --------------------------------------------------------------
439  Copy user defined parameters into global array g_inputParam
440  -------------------------------------------------------------- */
441 
442  for (nv = 0; nv < USER_DEF_PARAMETERS; nv++) g_inputParam[nv] = runtime->aux[nv];
443 
444 /* ------------------------------------------------------------
445  Allocate memory for 3D data arrays
446  ------------------------------------------------------------ */
447 
448  print1 ("\n> Memory allocation\n");
449  data->Vc = ARRAY_4D(NVAR, NX3_TOT, NX2_TOT, NX1_TOT, double);
450  data->Uc = ARRAY_4D(NX3_TOT, NX2_TOT, NX1_TOT, NVAR, double);
451 
452  #ifdef STAGGERED_MHD
453  data->Vs = ARRAY_1D(DIMENSIONS, double ***);
454  D_EXPAND(
455  data->Vs[BX1s] = ArrayBox( 0, NX3_TOT-1, 0, NX2_TOT-1,-1, NX1_TOT-1); ,
456  data->Vs[BX2s] = ArrayBox( 0, NX3_TOT-1,-1, NX2_TOT-1, 0, NX1_TOT-1); ,
457  data->Vs[BX3s] = ArrayBox(-1, NX3_TOT-1, 0, NX2_TOT-1, 0, NX1_TOT-1);)
458  #endif
459 
461  D_EXPAND( ,
462  data->Ax3 = ARRAY_3D(NX3_TOT, NX2_TOT, NX1_TOT, double); ,
463  data->Ax1 = ARRAY_3D(NX3_TOT, NX2_TOT, NX1_TOT, double);
464  data->Ax2 = ARRAY_3D(NX3_TOT, NX2_TOT, NX1_TOT, double);
465  )
466  #endif
467 
468  #if RESISTIVITY != NO
469  data->J = ARRAY_4D(3,NX3_TOT, NX2_TOT, NX1_TOT, double);
470  #endif
471 
472  data->flag = ARRAY_3D(NX3_TOT, NX2_TOT, NX1_TOT, unsigned char);
473 
474 /* ------------------------------------------------------------
475  Initialize tables needed for EOS
476  ------------------------------------------------------------ */
477 
478  #if EOS == PVTE_LAW && NIONS == 0
479  #if TV_ENERGY_TABLE == YES
481  #else
483  #endif
485  #endif
486 
487 /* ------------------------------------------------------------
488  Assign initial conditions
489  ------------------------------------------------------------ */
490 
491  Startup (data, grid);
492 
493 /* ------------------------------------------------------------
494  Set output attributes (names, images, number of outputs...)
495  ------------------------------------------------------------ */
496 
497  SetOutput (data, runtime);
498 
499 /* -----------------------------------
500  print normalization units
501  ----------------------------------- */
502 
503  ShowUnits();
504 
505  print1 ("> Number of processors: %d\n",nprocs);
506  D_EXPAND(print1 ("> Typical proc size: %d",grid[IDIR].np_int); ,
507  print1 (" X %d", grid[JDIR].np_int); ,
508  print1 (" X %d", grid[KDIR].np_int);)
509  print1 ("\n");
510  #ifdef PARALLEL
511  print1 ("> Parallel Directions: ");
512  D_EXPAND(if (pardim[IDIR]) print1 (" X1"); ,
513  if (pardim[JDIR]) print1 ("/X2"); ,
514  if (pardim[KDIR]) print1 ("/X3");)
515  print1 ("\n");
516  #endif
517  if (cmd_line->show_dec) ShowDomainDecomposition (nprocs, grid);
518 }
519 
520 #ifdef PARALLEL
521 /* ********************************************************************* */
522 int GetDecompMode (Cmd_Line *cmd_line, int procs[])
523 /*!
524  * Returns the parallel domain decomposition mode.
525  *
526  * \param [in] cmd_line pointer to the Cmd_Line structure
527  * \param [out] procs an array of integers giving the number
528  * of processors in each direction only if
529  * the -dec command line option has been given
530  *
531  * \return The decomposition mode:
532  *
533  * - AL_AUTO_DECOMP defaults
534  * - AL_USER_DECOMP if the -dec n1 [n2] [n3] command line
535  * argument has been given;
536  * In this case only, procs[] contains the number
537  * of processors in the three directions;
538  * - AL_MPI_DECOMP if [todo]
539  * \todo AL_MPI_DECOMP mode
540  *********************************************************************** */
541 {
542  int nprocs;
543  long int npx = 1, npy = 1, npz = 1;
544 
545  D_EXPAND(npx = cmd_line->nproc[IDIR]; ,
546  npy = cmd_line->nproc[JDIR]; ,
547  npz = cmd_line->nproc[KDIR];)
548 
549 /* ------------------------------------------------
550  if -dec has not been given
551  decomposition will be set to be AL_AUTO_DECOMP
552  ------------------------------------------------ */
553 
554  if (npx == -1 || npy == -1 || npz == -1){
555  return AL_AUTO_DECOMP;
556  }
557 
558 /* -----------------------------------------------
559  enter in user decomp mode if the number of
560  processors along all directions has been given
561  ------------------------------------------------ */
562 
563  if (npx > 0 && npy > 0 && npz > 0){
564  procs[IDIR] = npx;
565  procs[JDIR] = npy;
566  procs[KDIR] = npz;
567 
568  /* -- check if decomposition is correct -- */
569 
570  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
571  if (procs[IDIR]*procs[JDIR]*procs[KDIR] != nprocs){
572  printf ("! The specified parallel decomposition (%d, %d, %d) is not\n",
573  procs[IDIR],procs[JDIR],procs[KDIR]);
574  printf ("! consistent with the number of processors (%d).\n",nprocs);
575  QUIT_PLUTO(1);
576  }
577  return AL_USER_DECOMP;
578  }
579 
580  printf ("! GetDecompMode: invalid decomposition mode");
581  QUIT_PLUTO(1);
582 }
583 #endif
int AL_Is_boundary(int sz_ptr, int *is_gbeg, int *is_gend)
Definition: al_boundary.c:28
double Length_2(int i, int j, int k, Grid *)
Definition: set_geometry.c:169
#define SHEARING
Definition: pluto.h:138
#define MAX(a, b)
Definition: macros.h:101
int lbound
When different from zero, it specifies the boundary condition to be applied at leftmost grid side whe...
Definition: structs.h:105
void ShowDomainDecomposition(int, Grid *)
Definition: show_config.c:295
double **** Vs
The main four-index data array used for face-centered staggered magnetic fields.
Definition: structs.h:43
void PPM_CoefficientsSet(Grid *grid)
Definition: ppm_coeffs.c:58
int GetNghost(void)
Definition: get_nghost.c:18
int end
Global end index for the local array.
Definition: structs.h:116
void ShowConfig(int, char *a[], char *)
Definition: show_config.c:16
double **** J
Electric current defined as curl(B).
Definition: structs.h:54
int parallel_dim[3]
Definition: structs.h:19
#define USER_DEF_PARAMETERS
#define DIM_LOOP(d)
Definition: macros.h:227
int SetLogFile(char *, Cmd_Line *)
Definition: tools.c:321
int npoint[3]
Global number of zones in the interior domain.
Definition: structs.h:256
long int NX1
Number of interior zones in the X1 directions (boundaries excluded) for the local processor...
Definition: globals.h:48
int AL_Get_gbounds(int, int *, int *, int *, int)
Definition: al_sz_get.c:746
void print1(const char *fmt,...)
Definition: amrPluto.cpp:511
char output_dir[256]
The name of the output directory (output_dir for static PLUTO, Output_dir for PLUTO-Chombo) ...
Definition: structs.h:269
int np_int_glob
Total number of points in the global domain (boundaries excluded).
Definition: structs.h:98
tuple scrh
Definition: configure.py:200
int SZ_float
Definition: globals.h:27
int left_bound[3]
Array of left boundary types.
Definition: structs.h:257
#define YES
Definition: pluto.h:25
int rbound
Same as lbound, but for the right edge of the grid.
Definition: structs.h:112
long int NX2_TOT
Total number of zones in the X2 direction (boundaries included) for the local processor.
Definition: globals.h:57
double **** Vc
The main four-index data array used for cell-centered primitive variables.
Definition: structs.h:31
int g_maxRiemannIter
Maximum number of iterations for iterative Riemann Solver.
Definition: globals.h:93
double g_dt
The current integration time step.
Definition: globals.h:118
#define AL_TRUE
Definition: al_codes.h:28
double *** Ax3
Vector potential component in the direction.
Definition: structs.h:53
int lend
Local end index for the local array.
Definition: structs.h:118
int lbeg
Local start index for the local array.
Definition: structs.h:117
int AL_Get_local_dim(int, int *)
Definition: al_sz_get.c:309
int nproc[3]
Definition: structs.h:20
double *** ArrayBox(long int nrl, long int nrh, long int ncl, long int nch, long int ndl, long int ndh)
Definition: arrays.c:341
#define BX3s
Definition: ct.h:29
int gend
Global end index for the global array.
Definition: structs.h:114
#define ARRAY_3D(nx, ny, nz, type)
Definition: prototypes.h:172
int AL_Set_global_dim(int *, int)
Definition: al_sz_set.c:141
int prank
Processor rank.
Definition: globals.h:33
int AL_Set_staggered_dim(int *, int)
Definition: al_sz_set.c:293
int np_tot_glob
Total number of points in the global domain (boundaries included).
Definition: structs.h:96
#define KDIR
Definition: pluto.h:195
unsigned char *** flag
Pointer to a 3D array setting useful integration flags that are retrieved during integration.
Definition: structs.h:55
int AL_Decompose(int sz_ptr, int *procs, int mode)
Definition: al_decompose.c:39
int SZ_stagx
Definition: globals.h:24
int right_bound[3]
Array of right boundary types.
Definition: structs.h:258
#define AL_USER_DECOMP
Definition: al_codes.h:42
int SZ_stagy
Definition: globals.h:25
int SZ_Float_Vect
Definition: globals.h:29
#define AL_C_INDEXES
Definition: al_codes.h:36
#define MIN(a, b)
Definition: macros.h:104
int AL_Get_cart_comm(int, MPI_Comm *)
Definition: al_sz_get.c:117
double g_maxMach
The maximum Mach number computed during integration.
Definition: globals.h:119
#define AL_AUTO_DECOMP
Definition: al_codes.h:40
void Startup(Data *, Grid *)
Definition: startup.c:17
long int NX2
Number of interior zones in the X2 directions (boundaries excluded) for the local processor...
Definition: globals.h:50
#define IDIR
Definition: pluto.h:193
double Length_1(int i, int j, int k, Grid *)
Definition: set_geometry.c:156
int beg
Global start index for the local array.
Definition: structs.h:115
void PLM_CoefficientsSet(Grid *grid)
Definition: plm_coeffs.c:30
int AL_Sz_init(MPI_Comm, int *)
Definition: al_sz_init.c:22
Definition: structs.h:78
double g_inputParam[32]
Array containing the user-defined parameters.
Definition: globals.h:131
int gbeg
Global start index for the global array.
Definition: structs.h:113
long int NX3_TOT
Total number of zones in the X3 direction (boundaries included) for the local processor.
Definition: globals.h:59
#define UPDATE_VECTOR_POTENTIAL
Definition: pluto.h:381
int show_dec
Definition: structs.h:21
int j
Definition: analysis.c:2
if(divB==NULL)
Definition: analysis.c:10
#define ARRAY_4D(nx, ny, nz, nv, type)
Definition: prototypes.h:173
int nghost
Number of ghost zones.
Definition: structs.h:104
int SZ_stagz
Definition: globals.h:26
long int IEND
Upper grid index of the computational domain in the the X1 direction for the local processor...
Definition: globals.h:37
int RuntimeSetup(Runtime *, Cmd_Line *, char *)
Definition: runtime_setup.c:22
void Where(int, Grid *)
Definition: tools.c:235
int k
Definition: analysis.c:2
double dl_min
minimum cell length (e.g.
Definition: structs.h:94
#define BX1s
Definition: ct.h:27
static int GetDecompMode(Cmd_Line *cmd_line, int procs[])
double aux[32]
Definition: structs.h:284
int SZ_char
Definition: globals.h:28
void MakeEV_TemperatureTable()
int rank_coord
Parallel coordinate in a Cartesian topology.
Definition: structs.h:121
void SetRBox(void)
Definition: rbox.c:33
PLUTO main header file.
#define ARRAY_1D(nx, type)
Definition: prototypes.h:170
D_EXPAND(tot/[n]=(double) grid[IDIR].np_int_glob;, tot/[n]=(double) grid[JDIR].np_int_glob;, tot/[n]=(double) grid[KDIR].np_int_glob;)
Definition: analysis.c:27
void ParseCmdLineArgs(int argc, char *argv[], char *ini_file, Cmd_Line *cmd)
Definition: cmd_line_opt.c:18
Definition: structs.h:30
long int NMAX_POINT
Maximum number of points among the three directions, boundaries excluded.
Definition: globals.h:62
#define RESISTIVITY
Definition: pluto.h:357
int i
Definition: analysis.c:2
double g_time
The current integration time.
Definition: globals.h:117
int makegrid
Definition: structs.h:15
void MakeInternalEnergyTable()
double **** Uc
The main four-index data array used for cell-centered conservative variables.
Definition: structs.h:37
int AL_Set_parallel_dim(int *, int)
Definition: al_sz_set.c:216
double Length_3(int i, int j, int k, Grid *)
Definition: set_geometry.c:186
#define AL_FALSE
Definition: al_codes.h:29
int AL_Set_periodic_dim(int *, int)
Definition: al_sz_set.c:253
void ShowUnits()
Definition: show_config.c:260
void Initialize(int argc, char *argv[], Data *data, Runtime *runtime, Grid *grid, Cmd_Line *cmd_line)
Definition: initialize.c:37
void MakePV_TemperatureTable()
Definition: thermal_eos.c:63
long int KBEG
Lower grid index of the computational domain in the the X3 direction for the local processor...
Definition: globals.h:43
#define BX2s
Definition: ct.h:28
double first_dt
The initial time step (first_dt)
Definition: structs.h:281
#define PERIODIC
Definition: pluto.h:137
int np_tot
Total number of points in the local domain (boundaries included).
Definition: structs.h:100
long int KEND
Upper grid index of the computational domain in the the X3 direction for the local processor...
Definition: globals.h:45
int AL_Set_dimensions(int, int)
Definition: al_sz_set.c:69
Definition: al_hidden.h:38
#define JDIR
Definition: pluto.h:194
double *** Ax1
Vector potential component in the direction.
Definition: structs.h:51
long int JBEG
Lower grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:39
void SetGrid(Runtime *, Grid *)
Definition: set_grid.c:21
int AL_Set_type(AL_Datatype, int, int)
Definition: al_sz_set.c:101
#define NVAR
Definition: pluto.h:609
double *** Ax2
Vector potential component in the direction.
Definition: structs.h:52
#define QUIT_PLUTO(e_code)
Definition: macros.h:125
int AL_Get_lbounds(int, int *, int *, int *, int)
Definition: al_sz_get.c:668
long int JEND
Upper grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:41
long int g_usedMemory
Amount of used memory in bytes.
Definition: globals.h:96
long int NX1_TOT
Total number of zones in the X1 direction (boundaries included) for the local processor.
Definition: globals.h:55
int nproc
number of processors for this grid.
Definition: structs.h:120
long int NX3
Number of interior zones in the X3 directions (boundaries excluded) for the local processor...
Definition: globals.h:52
void RuntimeSet(Runtime *runtime)
long int IBEG
Lower grid index of the computational domain in the the X1 direction for the local processor...
Definition: globals.h:35
int AL_Set_ghosts(int *, int)
Definition: al_sz_set.c:333
#define DIMENSIONS
Definition: definitions_01.h:2
#define NO
Definition: pluto.h:26
int AL_Get_bounds(int, int *, int *, int *, int)
Definition: al_sz_get.c:707
void SetOutput(Data *d, Runtime *input)
Definition: set_output.c:37
int np_int
Total number of points in the local domain (boundaries excluded).
Definition: structs.h:102