PLUTO
main.c
Go to the documentation of this file.
1 /* ///////////////////////////////////////////////////////////////////// */
2 /*!
3  \file
4  \brief PLUTO main function.
5 
6  The file main.c contains the PLUTO main function and several other
7  top-level routines.
8  main() provides basic code initialization, handles the the principal
9  integration loop and calls the output driver write_data.c.
10  Other useful functions contained in this file are Integrate() which does
11  the actual integration, GetNextTimeStep() responsible for computing the
12  next time step based on the information available at the last time
13  level.
14 
15  We use two slightly different integration loops depending on whether
16  asnchrounous I/O has to be performed (macro USE_ASYNC_IO).
17  If the macro USE_ASYNC_IO is not defined, the standard
18  integration loop consists of the following steps:
19 
20  - Check for last step & adjust dt if necessary
21  - Dump log information, n, t(n), dt(n), MAX_MACH(n-1), etc..
22  - Check output/analysis: t(n) < tout < t(n)+dt(n)
23  - write to disk/call analysis using {U(n), t(n), dt(n)}
24  - Advance solution using dt(n): U(n) --> U(n+1)
25  - Increment t(n+1) = t(n) + dt(n)
26  - [MPI] Show dominant time step (n)
27  - [MPI] Get next time step dt(n+1)
28  - [MPI] reduction operations (n)
29  - Increment n --> n+1
30 
31  Otherwise, using Asynchrounous I/O:
32 
33  - Check for last step & adjust dt
34  - check for output/analysis: t(n) < tout < t(n+1)
35  - Write data/call analysis using {U(n), t(n), dt(n)}
36  - [MPI] Show dominant time step (n-1)
37  - [MPI] reduction operations (n-1)
38  - [AIO]: finish writing
39  - Dump log information, n, t(n), dt(n), MAX_MACH(n-1), etc..
40  - Advance solution using dt(n), U(n) --> U(n+1)
41  - Increment t(n+1) = t(n) + dt(n)
42  - [MPI] Get next time step dt(n)
43  - Increment n --> n+1
44 
45  \author A. Mignone (mignone@ph.unito.it)
46  \date Aug 16, 2012
47 */
48 /* ///////////////////////////////////////////////////////////////////// */
49 #include "pluto.h"
50 #include "globals.h"
51 
52 #ifndef SHOW_TIME_STEPS
53  #define SHOW_TIME_STEPS NO /* -- show time steps due to advection,
54  diffusion and cooling */
55 #endif
56 
57 static double NextTimeStep (Time_Step *, Runtime *, Grid *);
58 static char *TotalExecutionTime (double);
59 static int Integrate (Data *, Riemann_Solver *, Time_Step *, Grid *);
60 static void CheckForOutput (Data *, Runtime *, Grid *);
61 static void CheckForAnalysis (Data *, Runtime *, Grid *);
62 
63 /* ********************************************************************* */
64 int main (int argc, char *argv[])
65 /*!
66  * Start PLUTO, initialize functions, define data structures and
67  * handle the main integration loop.
68  *
69  * \param [in] argc Argument counts.
70  * \param [in] argv Array of pointers to the strings.
71  * \return This function return 0 on normal exit.
72  *
73  *********************************************************************** */
74 {
75  int nv, idim, err;
76  char first_step=1, last_step = 0;
77  double scrh;
78  Data data;
79  time_t tbeg, tend;
80  Riemann_Solver *Solver;
81  Grid grd[3];
82  Time_Step Dts;
83  Cmd_Line cmd_line;
84  Runtime ini;
85  Output *output;
86  double *dbl_pnt;
87  int *int_pnt;
88 
89  #ifdef PARALLEL
90  AL_Init (&argc, &argv);
91  MPI_Comm_rank (MPI_COMM_WORLD, &prank);
92  #endif
93 
94  Initialize (argc, argv, &data, &ini, grd, &cmd_line);
95 
96  print1 ("> Basic data type:\n");
97  print1 (" sizeof (char) = %d\n", sizeof(char));
98  print1 (" sizeof (uchar) = %d\n", sizeof(unsigned char));
99  print1 (" sizeof (short) = %d\n", sizeof(short));
100  print1 (" sizeof (ushort) = %d\n", sizeof(unsigned short));
101  print1 (" sizeof (int) = %d\n", sizeof(int));
102  print1 (" sizeof (*int) = %d\n", sizeof(int_pnt));
103  print1 (" sizeof (float) = %d\n", sizeof(float));
104  print1 (" sizeof (double) = %d\n", sizeof(double));
105  print1 (" sizeof (*double) = %d\n", sizeof(dbl_pnt));
106 
107 /*
108  print1 ("\n> Structure data type:\n");
109  print1 (" sizeof (CMD_LINE) = %d\n", sizeof(Cmd_Line));
110  print1 (" sizeof (DATA) = %d\n", sizeof(Data));
111  print1 (" sizeof (STATE_1D) = %d\n", sizeof(State_1D));
112  print1 (" sizeof (GRID) = %d\n", sizeof(Grid));
113  print1 (" sizeof (TIME_STEP) = %d\n", sizeof(Time_Step));
114  print1 (" sizeof (OUTPUT) = %d\n", sizeof(Output));
115  print1 (" sizeof (RUNTIME) = %d\n", sizeof(Runtime));
116  print1 (" sizeof (RESTART) = %d\n", sizeof(Restart));
117  print1 (" sizeof (RGB) = %d\n", sizeof(RGB));
118  print1 (" sizeof (IMAGE) = %d\n", sizeof(Image));
119  print1 (" sizeof (FLOAT_VECT) = %d\n", sizeof(Float_Vect));
120  print1 (" sizeof (INDEX) = %d\n", sizeof(Index));
121  print1 (" sizeof (RBOX) = %d\n", sizeof(RBox));
122 */
123 
124 /* -- initialize members of Time_Step structure -- */
125 
126  Dts.cmax = ARRAY_1D(NMAX_POINT, double);
127  Dts.inv_dta = 0.0;
128  Dts.inv_dtp = 0.5e-38;
129  Dts.dt_cool = 1.e38;
130  Dts.cfl = ini.cfl;
131  Dts.cfl_par = ini.cfl_par;
132  Dts.rmax_par = ini.rmax_par;
133  Dts.Nsts = Dts.Nrkc = 0;
134 
135  Solver = SetSolver (ini.solv_type);
136 
137  time (&tbeg);
138  g_stepNumber = 0;
139 
140 /* --------------------------------------------------------
141  Check if restart is necessary.
142  If not, write initial condition to disk.
143  ------------------------------------------------------- */
144 
145  if (cmd_line.restart == YES) {
146  RestartFromFile (&ini, cmd_line.nrestart, DBL_OUTPUT, grd);
147  }else if (cmd_line.h5restart == YES){
148  RestartFromFile (&ini, cmd_line.nrestart, DBL_H5_OUTPUT, grd);
149  }else if (cmd_line.write){
150  CheckForOutput (&data, &ini, grd);
151  CheckForAnalysis (&data, &ini, grd);
152  #ifdef USE_ASYNC_IO
153  Async_EndWriteData (&ini);
154  #endif
155  }
156 
157  print1 ("> Starting computation... \n\n");
158 
159 /* =====================================================================
160  M A I N L O O P S T A R T S H E R E
161  ===================================================================== */
162 
163 #ifndef USE_ASYNC_IO /* -- Standard loop, don't use Asynchrouns I/O -- */
164 
165  while (!last_step){
166 
167  /* ------------------------------------------------------
168  Check if this is the last integration step:
169  - final tstop has been reached: adjust time step
170  - or max number of steps has been reached
171  ------------------------------------------------------ */
172 
173  if ((g_time + g_dt) >= ini.tstop*(1.0 - 1.e-8)) {
174  g_dt = (ini.tstop - g_time);
175  last_step = 1;
176  }
177  if (g_stepNumber == cmd_line.maxsteps && cmd_line.maxsteps > 0) {
178  last_step = 1;
179  }
180 
181  /* ------------------------------------------------------
182  Dump log information
183  ------------------------------------------------------ */
184 
185  if (g_stepNumber%ini.log_freq == 0) {
186  print1 ("step:%d ; t = %10.4e ; dt = %10.4e ; %d %% ; [%f, %d",
187  g_stepNumber, g_time, g_dt, (int)(100.0*g_time/ini.tstop),
189 /* if (g_maxRootIter > 0) print1 (", root it. # = %d",g_maxRootIter); */
190  #if (PARABOLIC_FLUX & SUPER_TIME_STEPPING)
191  print1 (", Nsts = %d",Dts.Nsts);
192  #endif
193  #if (PARABOLIC_FLUX & RK_CHEBYSHEV)
194  print1 (", Nrkc = %d",Dts.Nrkc);
195  #endif
196  print1 ("]\n");
197  }
198 
199  /* ------------------------------------------------------
200  check if it's time to write or perform analysis
201  ------------------------------------------------------ */
202 
203  if (!first_step && !last_step && cmd_line.write) {
204  CheckForOutput (&data, &ini, grd);
205  CheckForAnalysis(&data, &ini, grd);
206  }
207 
208  /* ------------------------------------------------------
209  Advance solution array by a single time step
210  g_dt = dt(n)
211  ------------------------------------------------------ */
212 
213  if (cmd_line.jet != -1) SetJetDomain (&data, cmd_line.jet, ini.log_freq, grd);
214  err = Integrate (&data, Solver, &Dts, grd);
215  if (cmd_line.jet != -1) UnsetJetDomain (&data, cmd_line.jet, grd);
216 
217  /* ------------------------------------------------------
218  Integration didn't go through. Step must
219  be redone from previously saved solution.
220  ------------------------------------------------------ */
221 /*
222  if (err != 0){
223  print1 ("! Step failed. Re-trying\n");
224  zones with problems must be tagged with MINMOD_FLAG and HLL_FLAG
225  time step should be halved
226  GET_SOL(&data);
227  }
228 */
229 
230  /* ------------------------------------------------------
231  Increment time, t(n+1) = t(n) + dt(n)
232  ------------------------------------------------------ */
233 
234  g_time += g_dt;
235 
236  /* ------------------------------------------------------
237  Show the time step ratios between the actual g_dt
238  and the advection, diffusion and cooling time scales.
239  ------------------------------------------------------ */
240 
241  #if SHOW_TIME_STEPS == YES
242  if (g_stepNumber%ini.log_freq == 0) {
243  double cg, dta, dtp, dtc;
244  dta = 1.0/Dts.inv_dta;
245  dtp = 0.5/Dts.inv_dtp;
246  dtc = Dts.dt_cool;
247  #ifdef PARALLEL
248  MPI_Allreduce (&dta, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
249  dta = cg;
250 
251  MPI_Allreduce (&dtp, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
252  dtp = cg;
253 
254  MPI_Allreduce (&dtc, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
255  dtc = cg;
256  #endif
257  /*
258  print1 ("[dt/dt(adv) = %10.4e, dt/dt(par) = %10.4e, dt/dt(cool) = %10.4e]\n",
259  g_dt/dta, g_dt/dtp, g_dt/dtc);
260  */
261  print1 (" dt(adv) = cfl x %10.4e;\n",dta);
262  print1 (" dt(par) = cfl x %10.4e;\n",dtp);
263  print1 (" dt(cool) = %10.4e;\n",dtc);
264  }
265  #endif
266 
267  /* ------------------------------------------------------
268  Get next time step dt(n+1).
269  Do it every two steps if cooling or dimensional
270  splitting are used.
271  ------------------------------------------------------ */
272 
273  #if (COOLING == NO) && ((DIMENSIONS == 1) || (DIMENSIONAL_SPLITTING == NO))
274  g_dt = NextTimeStep(&Dts, &ini, grd);
275  #else
276  if (g_stepNumber%2 == 1) g_dt = NextTimeStep(&Dts, &ini, grd);
277  #endif
278 
279  /* ------------------------------------------------------
280  Global MPI reduction operations
281  ------------------------------------------------------ */
282 
283  #ifdef PARALLEL
284  MPI_Allreduce (&g_maxMach, &scrh, 1,
285  MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
286  g_maxMach = scrh;
287 
288  MPI_Allreduce (&g_maxRiemannIter, &nv, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
289  g_maxRiemannIter = nv;
290  #endif
291 
292  g_stepNumber++;
293 
294  first_step = 0;
295  }
296 
297 #else /* Use Asynchrounous I/O */
298 
299  while (!last_step){
300 
301  /* ------------------------------------------------------
302  Check if this is the last integration step:
303  - final tstop has been reached: adjust time step
304  - or max number of steps has been reached
305  ------------------------------------------------------ */
306 
307  if ((g_time + g_dt) >= ini.tstop*(1.0 - 1.e-8)) {
308  g_dt = (ini.tstop - g_time);
309  last_step = 1;
310  }
311  if (g_stepNumber == cmd_line.maxsteps && cmd_line.maxsteps > 0) {
312  last_step = 1;
313  }
314 
315  /* ------------------------------------------------------
316  check if it's time to write or perform analysis
317  ------------------------------------------------------ */
318 
319  if (!first_step && !last_step && cmd_line.write) {
320  CheckForOutput (&data, &ini, grd);
321  CheckForAnalysis(&data, &ini, grd);
322  }
323 
324  /* ------------------------------------------------------
325  Show the time step ratios between the actual g_dt
326  and the advection, diffusion and cooling time scales.
327  ------------------------------------------------------ */
328 
329  #if SHOW_TIME_STEPS == YES
330  if (!first_step && g_stepNumber%ini.log_freq == 0) {
331  double cg, dta, dtp, dtc;
332  dta = 1.0/Dts.inv_dta;
333  dtp = 0.5/Dts.inv_dtp;
334  dtc = Dts.dt_cool;
335  #ifdef PARALLEL
336  MPI_Allreduce (&dta, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
337  dta = cg;
338 
339  MPI_Allreduce (&dtp, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
340  dtp = cg;
341 
342  MPI_Allreduce (&dtc, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
343  dtc = cg;
344  #endif
345  print1 ("\t[dt/dta = %10.4e, dt/dtp = %10.4e, dt/dtc = %10.4e \n",
346  g_dt/dta, g_dt/dtp, g_dt/dtc);
347  }
348  #endif
349 
350  /* ------------------------------------------------------
351  Global MPI reduction operations
352  ------------------------------------------------------ */
353 
354  #ifdef PARALLEL
355  MPI_Allreduce (&g_maxMach, &scrh, 1,
356  MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
357  g_maxMach = scrh;
358 
359  MPI_Allreduce (&g_maxRiemannIter, &nv, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
360  g_maxRiemannIter = nv;
361  #endif
362 
363  /* ------------------------------------------------------
364  Finish writing using Async I/O
365  ------------------------------------------------------ */
366 
367  #ifdef USE_ASYNC_IO
368  Async_EndWriteData (&ini);
369  #endif
370 
371  /* ------------------------------------------------------
372  Dump log information
373  ------------------------------------------------------ */
374 
375  if (g_stepNumber%ini.log_freq == 0) {
376  print1 ("step:%d ; t = %10.4e ; dt = %10.4e ; %d %% ; [%f, %d",
377  g_stepNumber, g_time, g_dt, (int)(100.0*g_time/ini.tstop),
379  #if (PARABOLIC_FLUX & SUPER_TIME_STEPPING)
380  print1 (", Nsts = %d",Dts.Nsts);
381  #endif
382  #if (PARABOLIC_FLUX & RK_CHEBYSHEV)
383  print1 (", Nrkc = %d",Dts.Nrkc);
384  #endif
385  print1 ("]\n");
386  }
387 
388  /* ------------------------------------------------------
389  Advance solution array by a single time step
390  g_dt = dt(n)
391  ------------------------------------------------------ */
392 
393  if (cmd_line.jet != -1) SetJetDomain (&data, cmd_line.jet, ini.log_freq, grd);
394  err = Integrate (&data, Solver, &Dts, grd);
395  if (cmd_line.jet != -1) UnsetJetDomain (&data, cmd_line.jet, grd);
396 
397  /* ------------------------------------------------------
398  Integration didn't go through. Step must
399  be redone from previously saved solution.
400  ------------------------------------------------------ */
401 /*
402  if (err != 0){
403  print1 ("! Step failed. Re-trying\n");
404  zones with problems must be tagged with MINMOD_FLAG and HLL_FLAG
405  time step should be halved
406  GET_SOL(&data);
407  }
408 */
409  /* ------------------------------------------------------
410  Increment time, t(n+1) = t(n) + dt(n)
411  ------------------------------------------------------ */
412 
413  g_time += g_dt;
414 
415  /* ------------------------------------------------------
416  Get next time step dt(n+1)
417  ------------------------------------------------------ */
418 
419  g_dt = NextTimeStep(&Dts, &ini, grd);
420  g_stepNumber++;
421  first_step = 0;
422  }
423 #endif /* USE_ASYNC_IO */
424 
425 /* =====================================================================
426  M A I N L O O P E N D S H E R E
427  ===================================================================== */
428 
429  if (cmd_line.write){
430  CheckForOutput (&data, &ini, grd);
431  CheckForAnalysis (&data, &ini, grd);
432  #ifdef USE_ASYNC_IO
433  Async_EndWriteData (&ini);
434  #endif
435  }
436 
437  #ifdef PARALLEL
438  MPI_Barrier (MPI_COMM_WORLD);
439  print1 ("\n> Total allocated memory %6.2f Mb (proc #%d)\n",
440  (float)g_usedMemory/1.e6,prank);
441  MPI_Barrier (MPI_COMM_WORLD);
442  #else
443  print1 ("\n> Total allocated memory %6.2f Mb\n",(float)g_usedMemory/1.e6);
444  #endif
445 
446  time(&tend);
447  g_dt = difftime(tend, tbeg);
448  print1("> Elapsed time %s\n", TotalExecutionTime(g_dt));
449  print1("> Average time/step %10.2e (sec) \n",
450  difftime(tend,tbeg)/(double)g_stepNumber);
451  print1("> Local time %s",asctime(localtime(&tend)));
452  print1("> Done\n");
453 
454  FreeArray4D ((void *) data.Vc);
455  #ifdef PARALLEL
456  MPI_Barrier (MPI_COMM_WORLD);
457  AL_Finalize ();
458  #endif
459 
460  return (0);
461 }
462 #undef SHOW_TIME_STEPS
463 /* ******************************************************************** */
464 int Integrate (Data *d, Riemann_Solver *Solver, Time_Step *Dts, Grid *grid)
465 /*!
466  * Advance equations by a single time-step.
467 
468  * \param d pointer to PLUTO Data structure;
469  * \param Solver pointer to a Riemann solver function;
470  * \param Dts pointer to time Step structure;
471  * \param grid pointer to grid structure.
472  *
473  * \return An integer giving success / failure (development).
474  *
475  ********************************************************************** */
476 {
477  int idim, err = 0;
478  int i,j,k;
479 
480  g_maxMach = 0.0;
481  g_maxRiemannIter = 0;
482  g_maxRootIter = 0;
483 
484 /* -------------------------------------------------------
485  Initialize max propagation speed in Dedner's approach
486  ------------------------------------------------------- */
487 
488  #ifdef GLM_MHD /* -- initialize glm_ch -- */
489  GLM_Init (d, Dts, grid);
490  GLM_Source (d->Vc, 0.5*g_dt, grid);
491  #endif
492 
493  /* ---------------------------------------------
494  perform Strang Splitting on directions
495  (if necessary) and sources
496  --------------------------------------------- */
497 
498  TOT_LOOP(k,j,i) d->flag[k][j][i] = 0;
499 
500  #ifdef FARGO
501  FARGO_ComputeVelocity(d, grid);
502  #endif
503  if ((g_stepNumber%2) == 0){
505  #if DIMENSIONAL_SPLITTING == YES
506  for (g_dir = 0; g_dir < DIMENSIONS; g_dir++){
507  if (AdvanceStep (d, Solver, Dts, grid) != 0) return (1);
508  }
509  #else
510  if (AdvanceStep (d, Solver, Dts, grid) != 0) return(1);
511  #endif
513  SplitSource (d, g_dt, Dts, grid);
514  }else{
516  SplitSource (d, g_dt, Dts, grid);
518  #if DIMENSIONAL_SPLITTING == YES
519  for (g_dir = DIMENSIONS - 1; g_dir >= 0; g_dir--){
520  if (AdvanceStep(d, Solver, Dts, grid) != 0) return (1);
521  }
522  #else
523  if (AdvanceStep (d, Solver, Dts, grid) != 0) return(1);
524  #endif
525  }
526 
527  #ifdef GLM_MHD /* -- GLM source for dt/2 -- */
528  GLM_Source (d->Vc, 0.5*g_dt, grid);
529  #endif
530 
531  return (0); /* -- ok, step achieved -- */
532 }
533 
534 /* ********************************************************************* */
535 char *TotalExecutionTime (double dt)
536 /*!
537  *
538  * convert a floating-point variable (dt, in seconds) to a string
539  * displaying days:hours:minutes:seconds
540  *
541  *********************************************************************** */
542 {
543  static char c[128];
544  int days, hours, mins, secs;
545 
546  days = (int) (dt/86400.0);
547  hours = (int) ((dt - 86400.0*days)/3600.0);
548  mins = (int) ((dt - 86400.0*days - 3600.0*hours)/60.);
549  secs = (int) (dt - 86400.0*days - 3600.0*hours - 60.0*mins);
550 
551  sprintf (c, " %dd:%dh:%dm:%ds", days,hours, mins, secs);
552  return (c);
553 }
554 
555 /* ********************************************************************* */
556 double NextTimeStep (Time_Step *Dts, Runtime *ini, Grid *grid)
557 /*!
558  * Compute and return the time step for the next time level
559  * using the information from the previous integration
560  * (Dts->inv_dta and Dts->inv_dp).
561  *
562  * \param [in] Dts pointer to the Time_Step structure
563  * \param [in] ini pointer to the Runtime structure
564  * \param [in] grid pointer to array of Grid structures
565  *
566  * \return The time step for next time level
567  *********************************************************************** */
568 {
569  int idim;
570  double dt_adv, dt_par, dtnext;
571  double dxmin;
572  double xloc, xglob;
573 
574 /* ---------------------------------------------------
575  1. Take the maximum of inv_dt across all processors
576  --------------------------------------------------- */
577 
578  #ifdef PARALLEL
579  xloc = Dts->inv_dta;
580  MPI_Allreduce (&xloc, &xglob, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
581  Dts->inv_dta = xglob;
582  #if (PARABOLIC_FLUX != NO)
583  xloc = Dts->inv_dtp;
584  MPI_Allreduce (&xloc, &xglob, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
585  Dts->inv_dtp = xglob;
586  #endif
587  #if COOLING != NO
588  xloc = Dts->dt_cool;
589  MPI_Allreduce (&xloc, &xglob, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
590  Dts->dt_cool = xglob;
591  #endif
592  #endif
593 
594 /* ----------------------------------
595  2. Compute time step
596  ---------------------------------- */
597 
598  #if (PARABOLIC_FLUX & EXPLICIT)
599  dt_adv = 1.0/(Dts->inv_dta + 2.0*Dts->inv_dtp);
600  #else
601  dt_adv = 1.0/Dts->inv_dta;
602  #endif
603  dt_adv *= ini->cfl;
604  dtnext = dt_adv;
605 
606 /* -------------------------------------------------------
607  3. Maximum propagation speed for the local processor.
608  Global glm_ch will be computed later in GLM_Init.
609  ------------------------------------------------------- */
610 
611  #ifdef GLM_MHD
612  dxmin = grid[IDIR].dl_min;
613  for (idim = 1; idim < DIMENSIONS; idim++){ /* Min cell length */
614  dxmin = MIN(dxmin, grid[idim].dl_min);
615  }
616  glm_ch = ini->cfl*dxmin/dtnext;
617  #endif
618 
619 /* ---------------------------------------------------------
620  4. With STS, the ratio between advection (full) and
621  parabolic time steps should not exceed ini->rmax_par.
622  --------------------------------------------------------- */
623 
624  #if (PARABOLIC_FLUX & SUPER_TIME_STEPPING) || (PARABOLIC_FLUX & RK_CHEBYSHEV)
625  dt_par = ini->cfl_par/(2.0*Dts->inv_dtp);
626  dtnext *= MIN(1.0, ini->rmax_par/(dt_adv/dt_par));
627  #endif
628 
629 /* ----------------------------------
630  5. Compute Cooling time step
631  ---------------------------------- */
632 
633  #if COOLING != NO
634  dtnext = MIN(dtnext, Dts->dt_cool);
635  #endif
636 
637 /* --------------------------------------------------------------
638  6. Allow time step to vary at most by a factor
639  ini->cfl_max_var.
640  Quit if dt gets too small, issue a warning if first_dt has
641  been overestimated.
642  -------------------------------------------------------------- */
643 
644  dtnext = MIN(dtnext, ini->cfl_max_var*g_dt);
645 
646  if (dtnext < ini->first_dt*1.e-9){
647  print1 ("! NextTimeStep(): dt is too small (%12.6e). Cannot continue.\n", dtnext);
648  QUIT_PLUTO(1);
649  }
650 
651  if (g_stepNumber <= 1 && (ini->first_dt > dtnext/ini->cfl)){
652  print1 ("! NextTimeStep(): initial dt exceeds stability limit\n");
653  }
654 
655 /* --------------------------------------------
656  7. Reset time step coefficients
657  -------------------------------------------- */
658 
659  DIM_LOOP(idim) Dts->cmax[idim] = 0.0;
660  Dts->inv_dta = 0.0;
661  Dts->inv_dtp = 0.5e-38;
662  Dts->dt_cool = 1.e38;
663 
664  return(dtnext);
665 }
666 
667 /* ********************************************************************* */
668 void CheckForOutput (Data *d, Runtime *ini, Grid *grid)
669 /*!
670  * Check if file output has to be performed.
671  *
672  *********************************************************************** */
673 {
674  static int first_call = 1;
675  int n, check_dt, check_dn, check_dclock;
676  int restart_update, last_step;
677  double t, tnext;
678  Output *output;
679  static time_t clock_beg[MAX_OUTPUT_TYPES], clock_end;
680  static double tbeg[MAX_OUTPUT_TYPES], tend;
681  double dclock;
682 
683  restart_update = 0;
684  t = g_time;
685  tnext = t + g_dt;
686 
687  last_step = (fabs(t-ini->tstop) < 1.e-12 ? 1:0);
688 
689 /* -- on first execution initialize
690  current beginning time for all output types -- */
691 
692  if (first_call){
693  #ifdef PARALLEL
694  if (prank == 0){
695  double tstart;
696  tstart = MPI_Wtime();
697  for (n = 0; n < MAX_OUTPUT_TYPES; n++) tbeg[n] = tstart;
698  }
699  MPI_Bcast(tbeg, MAX_OUTPUT_TYPES, MPI_DOUBLE, 0, MPI_COMM_WORLD);
700  #else
701  for (n = 0; n < MAX_OUTPUT_TYPES; n++) time(clock_beg + n);
702  #endif
703  }
704 
705 /* -- get current time -- */
706 
707  #ifdef PARALLEL
708  if (prank == 0) tend = MPI_Wtime();
709  MPI_Bcast(&tend, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
710  #else
711  time(&clock_end);
712  #endif
713 
714 /* -------------------------------------------------------
715  start main loop on outputs
716  ------------------------------------------------------- */
717 
718  for (n = 0; n < MAX_OUTPUT_TYPES; n++){
719  output = ini->output + n;
720  check_dt = check_dn = check_dclock = 0;
721 
722  /* -- check time interval in code units (dt) -- */
723 
724  if (output->dt > 0.0){
725  check_dt = (int) (tnext/output->dt) - (int)(t/output->dt);
726  check_dt = check_dt || g_stepNumber == 0 || last_step;
727  }
728 
729  /* -- check time interval in number of steps (dn) -- */
730 
731  if (output->dn > 0){
732  check_dn = (g_stepNumber%output->dn) == 0;
733  check_dn = check_dn || g_stepNumber == 0 || last_step;
734  }
735 
736  /* -- check time interval in clock time (dclock) -- */
737 
738  if (output->dclock > 0.0){
739  #ifdef PARALLEL
740  dclock = tend - tbeg[n];
741  #else
742  dclock = difftime(clock_end, clock_beg[n]);
743  #endif
744  if (dclock >= output->dclock) {
745  check_dclock = 1;
746  #ifdef PARALLEL
747  tbeg[n] = tend;
748  #else
749  time(clock_beg + n);
750  #endif
751  }else{
752  check_dclock = 0;
753  }
754  check_dclock = check_dclock || g_stepNumber == 0 || last_step;
755  }
756 
757  /* -- if any of the previous is true dump data to disk -- */
758 
759  if (check_dt || check_dn || check_dclock) {
760 
761  #ifdef USE_ASYNC_IO
762  if (!strcmp(output->mode,"single_file_async")){
763  Async_BegWriteData (d, output, grid);
764  }else{
765  WriteData(d, output, grid);
766  }
767  #else
768  WriteData(d, output, grid);
769  #endif
770 
771  /* ----------------------------------------------------------
772  save the file number of the dbl and dbl.h5 output format
773  for writing restart.out once we exit the loop.
774  ---------------------------------------------------------- */
775 
776  if ((output->type == DBL_OUTPUT) ||
777  (output->type == DBL_H5_OUTPUT)) restart_update = 1;
778  }
779  }
780 
781 /* -------------------------------------------------------
782  Dump restart information if required
783 
784  Note that if both dbl and dbl.h5 formats are used,
785  bookkeeping is done using dbl format.
786  ------------------------------------------------------- */
787 
788  if (restart_update) RestartDump (ini);
789 
790  first_call = 0;
791 }
792 
793 /* ******************************************************************** */
794 void CheckForAnalysis (Data *d, Runtime *ini, Grid *grid)
795 /*
796  *
797  * PURPOSE
798  *
799  * Check if Analysis needs to be called
800  *
801  ********************************************************************** */
802 {
803  int check_dt, check_dn;
804  double t, tnext;
805 
806  t = g_time;
807  tnext = t + g_dt;
808  check_dt = (int) (tnext/ini->anl_dt) - (int)(t/ini->anl_dt);
809  check_dt = check_dt || g_stepNumber == 0 || fabs(t - ini->tstop) < 1.e-9;
810  check_dt = check_dt && (ini->anl_dt > 0.0);
811 
812  check_dn = (g_stepNumber%ini->anl_dn) == 0;
813  check_dn = check_dn && (ini->anl_dn > 0);
814 
815  if (check_dt || check_dn) Analysis (d, grid);
816 }
static char * TotalExecutionTime(double)
Definition: main.c:535
int write
Definition: structs.h:16
int main(int argc, char *argv[])
Definition: main.c:64
void FARGO_ComputeVelocity(const Data *, Grid *)
static int n
Definition: analysis.c:3
#define DIM_LOOP(d)
Definition: macros.h:227
int g_maxRootIter
Maximum number of iterations for root finder.
Definition: globals.h:95
Riemann_Solver * SetSolver(const char *solver)
Definition: set_solver.c:4
void WriteData(const Data *, Output *, Grid *)
Definition: write_data.c:29
int nrestart
Definition: structs.h:14
Output output[MAX_OUTPUT_TYPES]
Definition: structs.h:272
void print1(const char *fmt,...)
Definition: amrPluto.cpp:511
void Riemann_Solver(const State_1D *, int, int, double *, Grid *)
Definition: pluto.h:489
static void CheckForOutput(Data *, Runtime *, Grid *)
Definition: main.c:668
tuple scrh
Definition: configure.py:200
static double cg
Definition: init.c:75
int AL_Init(int *argc, char ***argv)
Definition: al_init.c:18
#define YES
Definition: pluto.h:25
double **** Vc
The main four-index data array used for cell-centered primitive variables.
Definition: structs.h:31
int g_maxRiemannIter
Maximum number of iterations for iterative Riemann Solver.
Definition: globals.h:93
double g_dt
The current integration time step.
Definition: globals.h:118
int AL_Finalize()
Definition: al_finalize.c:15
int Nrkc
Maximum number of substeps used in RKC.
Definition: structs.h:224
int h5restart
Definition: structs.h:13
double * cmax
Maximum signal velocity for hyperbolic eqns.
Definition: structs.h:214
int prank
Processor rank.
Definition: globals.h:33
Collects global variables definitions.
unsigned char *** flag
Pointer to a 3D array setting useful integration flags that are retrieved during integration.
Definition: structs.h:55
int log_freq
The log frequency (log)
Definition: structs.h:263
double inv_dtp
Inverse of diffusion (parabolic) time step .
Definition: structs.h:217
double anl_dt
Time step increment for Analysis() ( analysis (double) )
Definition: structs.h:282
double rmax_par
(STS) max ratio between current time step and parabolic time step
Definition: structs.h:278
double dt
time increment between outputs - one per output
Definition: structs.h:245
#define MIN(a, b)
Definition: macros.h:104
double inv_dta
Inverse of advection (hyperbolic) time step, .
Definition: structs.h:215
double g_maxMach
The maximum Mach number computed during integration.
Definition: globals.h:119
#define PARABOLIC_STEP
Definition: pluto.h:74
double dt_cool
Cooling time step.
Definition: structs.h:219
long int g_stepNumber
Gives the current integration step number.
Definition: globals.h:97
#define TOT_LOOP(k, j, i)
Definition: macros.h:44
void UnsetJetDomain(const Data *d, int dir, Grid *grid)
Definition: jet_domain.c:154
#define IDIR
Definition: pluto.h:193
int g_dir
Specifies the current sweep or direction of integration.
Definition: globals.h:86
double cfl_par
Courant number for diffusion (STS only).
Definition: structs.h:221
#define DBL_H5_OUTPUT
Definition: pluto.h:82
Definition: structs.h:78
int j
Definition: analysis.c:2
void Analysis(const Data *, Grid *)
Definition: init.c:66
int k
Definition: analysis.c:2
char solv_type[64]
The Riemann solver (Solver)
Definition: structs.h:267
double dl_min
minimum cell length (e.g.
Definition: structs.h:94
#define HYPERBOLIC_STEP
Definition: pluto.h:73
tuple c
Definition: menu.py:375
int jet
Definition: structs.h:18
#define MAX_OUTPUT_TYPES
Definition: pluto.h:95
int maxsteps
Definition: structs.h:17
double tstop
The final integration time (tstop)
Definition: structs.h:280
double cfl
Courant number for advection.
Definition: structs.h:220
PLUTO main header file.
#define ARRAY_1D(nx, type)
Definition: prototypes.h:170
Definition: structs.h:30
long int NMAX_POINT
Maximum number of points among the three directions, boundaries excluded.
Definition: globals.h:62
int g_operatorStep
Gives the current operator step.
Definition: globals.h:101
int i
Definition: analysis.c:2
double g_time
The current integration time.
Definition: globals.h:117
#define DBL_OUTPUT
Definition: pluto.h:79
void SplitSource(const Data *, double, Time_Step *, Grid *)
Definition: split_source.c:25
char mode[32]
single or multiple files - one per output
Definition: structs.h:241
void SetJetDomain(const Data *d, int dir, int log_freq, Grid *grid)
Definition: jet_domain.c:29
int type
output format (DBL, FLT, ...) - one per output
Definition: structs.h:233
int Nsts
Maximum number of substeps used in STS.
Definition: structs.h:223
void Initialize(int argc, char *argv[], Data *data, Runtime *runtime, Grid *grid, Cmd_Line *cmd_line)
Definition: initialize.c:37
void FreeArray4D(void ****m)
Definition: arrays.c:67
static void CheckForAnalysis(Data *, Runtime *, Grid *)
Definition: main.c:794
double first_dt
The initial time step (first_dt)
Definition: structs.h:281
double cfl_par
(STS) parabolic cfl number
Definition: structs.h:277
void RestartFromFile(Runtime *, int, int, Grid *)
Definition: restart.c:17
double glm_ch
The propagation speed of divergence error.
Definition: glm.c:21
double cfl_max_var
Maximum increment between consecutive time steps (CFL_max_var).
Definition: structs.h:275
double rmax_par
Definition: structs.h:222
void RestartDump(Runtime *)
Definition: restart.c:251
int restart
Definition: structs.h:12
#define QUIT_PLUTO(e_code)
Definition: macros.h:125
void GLM_Source(const Data_Arr Q, double dt, Grid *grid)
Definition: glm.c:139
int anl_dn
Definition: structs.h:266
void GLM_Init(const Data *d, const Time_Step *Dts, Grid *grid)
Definition: glm.c:324
long int g_usedMemory
Amount of used memory in bytes.
Definition: globals.h:96
int AdvanceStep(const Data *, Riemann_Solver *, Time_Step *, Grid *)
Definition: ctu_step.c:88
static int Integrate(Data *, Riemann_Solver *, Time_Step *, Grid *)
Definition: main.c:464
#define DIMENSIONS
Definition: definitions_01.h:2
static double NextTimeStep(Time_Step *, Runtime *, Grid *)
Definition: main.c:556
double dclock
time increment in clock hours - one per output
Definition: structs.h:246
int dn
step increment between outputs - one per output
Definition: structs.h:238
double cfl
Hyperbolic cfl number (CFL)
Definition: structs.h:274