52 #ifndef SHOW_TIME_STEPS 
   53   #define SHOW_TIME_STEPS  NO    
   64 int main (
int argc, 
char *argv[])
 
   76   char   first_step=1, last_step = 0;
 
   91    MPI_Comm_rank (MPI_COMM_WORLD, &
prank);
 
   94   Initialize (argc, argv, &data, &ini, grd, &cmd_line);
 
   96   print1 (
"> Basic data type:\n");
 
   97   print1 (
"  sizeof (char)     = %d\n", 
sizeof(
char));
 
   98   print1 (
"  sizeof (uchar)    = %d\n", 
sizeof(
unsigned char));
 
   99   print1 (
"  sizeof (short)    = %d\n", 
sizeof(
short));
 
  100   print1 (
"  sizeof (ushort)   = %d\n", 
sizeof(
unsigned short));
 
  101   print1 (
"  sizeof (int)      = %d\n", 
sizeof(
int));
 
  102   print1 (
"  sizeof (*int)     = %d\n", 
sizeof(int_pnt));
 
  103   print1 (
"  sizeof (float)    = %d\n", 
sizeof(
float));
 
  104   print1 (
"  sizeof (double)   = %d\n", 
sizeof(
double));
 
  105   print1 (
"  sizeof (*double)  = %d\n", 
sizeof(dbl_pnt));
 
  149   }
else if (cmd_line.
write){
 
  153      Async_EndWriteData (&ini);
 
  157   print1 (
"> Starting computation... \n\n");
 
  186       print1 (
"step:%d ; t = %10.4e ; dt = %10.4e ; %d %% ; [%f, %d",
 
  190       #if (PARABOLIC_FLUX & SUPER_TIME_STEPPING) 
  193       #if (PARABOLIC_FLUX & RK_CHEBYSHEV) 
  203     if (!first_step && !last_step && cmd_line.
write) {
 
  214     err = 
Integrate (&data, Solver, &Dts, grd);
 
  241     #if SHOW_TIME_STEPS == YES 
  243        double cg, dta, dtp, dtc;
 
  248         MPI_Allreduce (&dta, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
 
  251         MPI_Allreduce (&dtp, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
 
  254         MPI_Allreduce (&dtc, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
 
  261        print1 (
"  dt(adv)  = cfl x %10.4e;\n",dta);
 
  262        print1 (
"  dt(par)  = cfl x %10.4e;\n",dtp);
 
  263        print1 (
"  dt(cool) =       %10.4e;\n",dtc);
 
  273     #if (COOLING == NO) && ((DIMENSIONS == 1) || (DIMENSIONAL_SPLITTING == NO)) 
  285                     MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
 
  288      MPI_Allreduce (&
g_maxRiemannIter, &nv, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
 
  319     if (!first_step && !last_step && cmd_line.
write) {
 
  329     #if SHOW_TIME_STEPS == YES 
  331        double cg, dta, dtp, dtc;
 
  336         MPI_Allreduce (&dta, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
 
  339         MPI_Allreduce (&dtp, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
 
  342         MPI_Allreduce (&dtc, &cg, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
 
  345        print1 (
"\t[dt/dta = %10.4e, dt/dtp = %10.4e, dt/dtc = %10.4e \n",
 
  356                     MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
 
  359      MPI_Allreduce (&
g_maxRiemannIter, &nv, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
 
  368      Async_EndWriteData (&ini);
 
  376       print1 (
"step:%d ; t = %10.4e ; dt = %10.4e ; %d %% ; [%f, %d",
 
  379       #if (PARABOLIC_FLUX & SUPER_TIME_STEPPING) 
  382       #if (PARABOLIC_FLUX & RK_CHEBYSHEV) 
  394     err = 
Integrate (&data, Solver, &Dts, grd);
 
  433      Async_EndWriteData (&ini);
 
  438    MPI_Barrier (MPI_COMM_WORLD);
 
  439    print1  (
"\n> Total allocated memory  %6.2f Mb (proc #%d)\n",
 
  441    MPI_Barrier (MPI_COMM_WORLD);
 
  447   g_dt = difftime(tend, tbeg);
 
  449   print1(
"> Average time/step       %10.2e  (sec)  \n", 
 
  451   print1(
"> Local time                %s",asctime(localtime(&tend)));
 
  456    MPI_Barrier (MPI_COMM_WORLD);
 
  462 #undef SHOW_TIME_STEPS 
  505     #if DIMENSIONAL_SPLITTING == YES 
  507        if (
AdvanceStep (d, Solver, Dts, grid) != 0) 
return (1);
 
  510      if (
AdvanceStep (d, Solver, Dts, grid) != 0) 
return(1);
 
  518     #if DIMENSIONAL_SPLITTING == YES 
  520        if (
AdvanceStep(d, Solver, Dts, grid) != 0) 
return (1);
 
  523      if (
AdvanceStep (d, Solver, Dts, grid) != 0) 
return(1);
 
  544   int days, hours, mins, secs;
 
  546   days  = (int) (dt/86400.0);
 
  547   hours = (int) ((dt - 86400.0*days)/3600.0);
 
  548   mins  = (int) ((dt - 86400.0*days - 3600.0*hours)/60.);
 
  549   secs  = (int) (dt - 86400.0*days - 3600.0*hours - 60.0*mins);
 
  551   sprintf (c, 
" %dd:%dh:%dm:%ds", days,hours, mins, secs);
 
  570   double dt_adv, dt_par, dtnext;
 
  580    MPI_Allreduce (&xloc, &xglob, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
 
  582    #if (PARABOLIC_FLUX != NO) 
  584     MPI_Allreduce (&xloc, &xglob, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
 
  589     MPI_Allreduce (&xloc, &xglob, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
 
  598   #if (PARABOLIC_FLUX & EXPLICIT) 
  614      dxmin = 
MIN(dxmin, grid[idim].dl_min);
 
  624   #if (PARABOLIC_FLUX & SUPER_TIME_STEPPING) || (PARABOLIC_FLUX & RK_CHEBYSHEV) 
  646   if (dtnext < ini->first_dt*1.e-9){
 
  647     print1 (
"! NextTimeStep(): dt is too small (%12.6e). Cannot continue.\n", dtnext);
 
  652     print1 (
"! NextTimeStep(): initial dt exceeds stability limit\n");
 
  674   static int first_call = 1;
 
  675   int  n, check_dt, check_dn, check_dclock;
 
  676   int  restart_update, last_step;
 
  687   last_step = (fabs(t-ini->
tstop) < 1.e-12 ? 1:0);
 
  696        tstart = MPI_Wtime();
 
  708    if (
prank == 0) tend = MPI_Wtime();
 
  709    MPI_Bcast(&tend, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
 
  720     check_dt = check_dn = check_dclock = 0; 
 
  724     if (output->
dt > 0.0){
 
  725       check_dt = (int) (tnext/output->
dt) - (int)(t/output->
dt);
 
  738     if (output->
dclock > 0.0){
 
  740        dclock = tend - tbeg[
n];
 
  742        dclock = difftime(clock_end, clock_beg[n]);
 
  744       if (dclock >= output->
dclock) {
 
  754       check_dclock = check_dclock || 
g_stepNumber == 0 || last_step;
 
  759     if (check_dt || check_dn || check_dclock) { 
 
  762        if (!strcmp(output->
mode,
"single_file_async")){
 
  763          Async_BegWriteData (d, output, grid);
 
  803   int check_dt, check_dn;
 
  808   check_dt = (int) (tnext/ini->
anl_dt) - (int)(t/ini->
anl_dt);
 
  810   check_dt = check_dt && (ini->
anl_dt > 0.0);
 
  813   check_dn = check_dn && (ini->
anl_dn > 0);
 
  815   if (check_dt || check_dn) 
Analysis (d, grid);
 
static char * TotalExecutionTime(double)
 
int main(int argc, char *argv[])
 
void FARGO_ComputeVelocity(const Data *, Grid *)
 
int g_maxRootIter
Maximum number of iterations for root finder. 
 
Riemann_Solver * SetSolver(const char *solver)
 
void WriteData(const Data *, Output *, Grid *)
 
Output output[MAX_OUTPUT_TYPES]
 
void print1(const char *fmt,...)
 
void Riemann_Solver(const State_1D *, int, int, double *, Grid *)
 
static void CheckForOutput(Data *, Runtime *, Grid *)
 
int AL_Init(int *argc, char ***argv)
 
double **** Vc
The main four-index data array used for cell-centered primitive variables. 
 
int g_maxRiemannIter
Maximum number of iterations for iterative Riemann Solver. 
 
double g_dt
The current integration time step. 
 
int Nrkc
Maximum number of substeps used in RKC. 
 
double * cmax
Maximum signal velocity for hyperbolic eqns. 
 
Collects global variables definitions. 
 
unsigned char *** flag
Pointer to a 3D array setting useful integration flags that are retrieved during integration. 
 
int log_freq
The log frequency (log) 
 
double inv_dtp
Inverse of diffusion (parabolic) time step . 
 
double anl_dt
Time step increment for Analysis() (  analysis (double)  ) 
 
double rmax_par
(STS) max ratio between current time step and parabolic time step 
 
double dt
time increment between outputs - one per output 
 
double inv_dta
Inverse of advection (hyperbolic) time step, . 
 
double g_maxMach
The maximum Mach number computed during integration. 
 
double dt_cool
Cooling time step. 
 
long int g_stepNumber
Gives the current integration step number. 
 
#define TOT_LOOP(k, j, i)
 
void UnsetJetDomain(const Data *d, int dir, Grid *grid)
 
int g_dir
Specifies the current sweep or direction of integration. 
 
double cfl_par
Courant number for diffusion (STS only). 
 
void Analysis(const Data *, Grid *)
 
char solv_type[64]
The Riemann solver (Solver) 
 
double dl_min
minimum cell length (e.g. 
 
double tstop
The final integration time (tstop) 
 
double cfl
Courant number for advection. 
 
#define ARRAY_1D(nx, type)                
 
long int NMAX_POINT
Maximum number of points among the three directions, boundaries excluded. 
 
int g_operatorStep
Gives the current operator step. 
 
double g_time
The current integration time. 
 
void SplitSource(const Data *, double, Time_Step *, Grid *)
 
char mode[32]
single or multiple files - one per output 
 
void SetJetDomain(const Data *d, int dir, int log_freq, Grid *grid)
 
int type
output format (DBL, FLT, ...) - one per output 
 
int Nsts
Maximum number of substeps used in STS. 
 
void Initialize(int argc, char *argv[], Data *data, Runtime *runtime, Grid *grid, Cmd_Line *cmd_line)
 
void FreeArray4D(void ****m)
 
static void CheckForAnalysis(Data *, Runtime *, Grid *)
 
double first_dt
The initial time step (first_dt) 
 
double cfl_par
(STS) parabolic cfl number 
 
void RestartFromFile(Runtime *, int, int, Grid *)
 
double glm_ch
The propagation speed of divergence error. 
 
double cfl_max_var
Maximum increment between consecutive time steps (CFL_max_var). 
 
void RestartDump(Runtime *)
 
#define QUIT_PLUTO(e_code)  
 
void GLM_Source(const Data_Arr Q, double dt, Grid *grid)
 
void GLM_Init(const Data *d, const Time_Step *Dts, Grid *grid)
 
long int g_usedMemory
Amount of used memory in bytes. 
 
int AdvanceStep(const Data *, Riemann_Solver *, Time_Step *, Grid *)
 
static int Integrate(Data *, Riemann_Solver *, Time_Step *, Grid *)
 
static double NextTimeStep(Time_Step *, Runtime *, Grid *)
 
double dclock
time increment in clock hours - one per output 
 
int dn
step increment between outputs - one per output 
 
double cfl
Hyperbolic cfl number (CFL)