PLUTO
hdf5_io.c
Go to the documentation of this file.
1 /* ///////////////////////////////////////////////////////////////////// */
2 /*!
3  \file
4  \brief HDF5 I/O main driver.
5 
6  This file provides I/O functionality for HDF5 data format.
7  The output suffix is ".h5".
8 
9  The WriteHDF5() function allows to write data in serial or parallel
10  mode in either single or double precision.
11 
12  The ReadHDF5() function allows to read double precision data
13  in serial or parrallel mode.
14 
15  \note
16  By turning "MPI_POSIX" to "YES", HDF5 uses another parallel IO driver
17  called MPI POSIX which is a "combination" MPI-2 and posix I/O driver.
18  It uses MPI for coordinating the actions of several processes and
19  posix I/O calls to do the actual I/O to the disk.
20  There is no collective I/O mode with this driver. This will almost
21  certainly not work correctly for files accessed on distributed parallel
22  systems with the file located on a non-parallel filesystem.
23  On some systems, Using MPI POSIX driver may perform better than
24  using MPI-IO driver with independent IO mode.\n
25  For more info take a look at
26  http://www.hdfgroup.org/HDF5/PHDF5/parallelhdf5hints.pdf
27 
28  \authors C. Zanni (zanni@oato.inaf.it)\n
29  A. Mignone (mignone@ph.unito.it)
30  G. Musicanisi (g.muscianisi@cineca.it)\n
31  \date May 22, 2014
32 */
33 /* ///////////////////////////////////////////////////////////////////// */
34 #include "pluto.h"
35 #define H5_USE_16_API
36 #include "hdf5.h"
37 
38 #ifndef PARALLEL
39  #define MPI_POSIX YES
40 #else
41  #define MPI_POSIX NO
42 #endif
43 
44 /* ********************************************************************* */
45 void WriteHDF5 (Output *output, Grid *grid)
46 /*!
47  * Write data to disk using hdf5 format in single or double
48  * precision.
49  *
50  * \param [in] output the output structure associated with HDF5 format
51  * \param [in] grid a pointer to an array of Grid structures
52  *
53  * \return This function has no return value.
54  *********************************************************************** */
55 {
56  hid_t dataspace, memspace, dataset;
57  hid_t strspace, stratt, string_type;
58  hid_t tspace, tattr;
59  hid_t file_identifier, group, timestep;
60  hid_t file_access = 0;
61  #if MPI_POSIX == NO
62  hid_t plist_id_mpiio = 0; /* for collective MPI I/O */
63  #endif
64  hid_t err;
65 
66  hsize_t dimstr;
67  hsize_t dimens[DIMENSIONS];
68  hsize_t start[DIMENSIONS];
69  hsize_t stride[DIMENSIONS];
70  hsize_t count[DIMENSIONS];
71 
72  time_t tbeg, tend;
73  static float ****node_coords, ****cell_coords;
74  char filename[512], filenamexmf[512], tstepname[32];
75  char *coords = "/cell_coords/X /cell_coords/Y /cell_coords/Z ";
76  char *cname[] = {"X", "Y", "Z"};
77  char xmfext[8];
78  int rank, nd, nv, ns, nc, ngh, ii, jj, kk;
79  int n1p, n2p, n3p, nprec;
80  Grid *wgrid[3];
81  FILE *fxmf;
82 
83 /* ----------------------------------------------------------------
84  compute coordinates just once
85  ---------------------------------------------------------------- */
86 
87  if (node_coords == NULL) {
88  double x1, x2, x3;
89 
90  n1p = NX1 + (grid[IDIR].rbound != 0);
91  n2p = NX2 + (grid[JDIR].rbound != 0);
92  n3p = NX3 + (grid[KDIR].rbound != 0);
93 
94  node_coords = ARRAY_4D(3, n3p, n2p, n1p, float);
95  cell_coords = ARRAY_4D(3, NX3, NX2, NX1, float);
96 
97  for (kk = 0; kk < n3p; kk++) { x3 = grid[KDIR].xl[KBEG+kk];
98  for (jj = 0; jj < n2p; jj++) { x2 = grid[JDIR].xl[JBEG+jj];
99  for (ii = 0; ii < n1p; ii++) { x1 = grid[IDIR].xl[IBEG+ii];
100 
101  node_coords[JDIR][kk][jj][ii] = 0.0;
102  node_coords[KDIR][kk][jj][ii] = 0.0;
103 
104  #if GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL
105  D_EXPAND(node_coords[IDIR][kk][jj][ii] = (float)x1; ,
106  node_coords[JDIR][kk][jj][ii] = (float)x2; ,
107  node_coords[KDIR][kk][jj][ii] = (float)x3;)
108  #elif GEOMETRY == POLAR
109  D_EXPAND(node_coords[IDIR][kk][jj][ii] = (float)(x1*cos(x2)); ,
110  node_coords[JDIR][kk][jj][ii] = (float)(x1*sin(x2)); ,
111  node_coords[KDIR][kk][jj][ii] = (float)(x3);)
112  #elif GEOMETRY == SPHERICAL
113  #if DIMENSIONS <= 2
114  D_EXPAND(node_coords[IDIR][kk][jj][ii] = (float)(x1*sin(x2)); ,
115  node_coords[JDIR][kk][jj][ii] = (float)(x1*cos(x2)); ,
116  node_coords[KDIR][kk][jj][ii] = 0.0;)
117  #else
118  D_EXPAND(node_coords[IDIR][kk][jj][ii] = (float)(x1*sin(x2)*cos(x3)); ,
119  node_coords[JDIR][kk][jj][ii] = (float)(x1*sin(x2)*sin(x3)); ,
120  node_coords[KDIR][kk][jj][ii] = (float)(x1*cos(x2));)
121  #endif
122  #else
123  print1 ("! HDF5_IO: Unknown geometry\n");
124  QUIT_PLUTO(1);
125  #endif
126  }}}
127 
128  for (kk = 0; kk < NX3; kk++) { x3 = grid[KDIR].x[KBEG+kk];
129  for (jj = 0; jj < NX2; jj++) { x2 = grid[JDIR].x[JBEG+jj];
130  for (ii = 0; ii < NX1; ii++) { x1 = grid[IDIR].x[IBEG+ii];
131 
132  cell_coords[JDIR][kk][jj][ii] = 0.0;
133  cell_coords[KDIR][kk][jj][ii] = 0.0;
134 
135  #if GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL
136  D_EXPAND(cell_coords[IDIR][kk][jj][ii] = (float)x1; ,
137  cell_coords[JDIR][kk][jj][ii] = (float)x2; ,
138  cell_coords[KDIR][kk][jj][ii] = (float)x3;)
139  #elif GEOMETRY == POLAR
140  D_EXPAND(cell_coords[IDIR][kk][jj][ii] = (float)(x1*cos(x2)); ,
141  cell_coords[JDIR][kk][jj][ii] = (float)(x1*sin(x2)); ,
142  cell_coords[KDIR][kk][jj][ii] = (float)(x3);)
143  #elif GEOMETRY == SPHERICAL
144  #if DIMENSIONS <= 2
145  D_EXPAND(cell_coords[IDIR][kk][jj][ii] = (float)(x1*sin(x2)); ,
146  cell_coords[JDIR][kk][jj][ii] = (float)(x1*cos(x2)); ,
147  cell_coords[KDIR][kk][jj][ii] = 0.0;)
148  #else
149  D_EXPAND(cell_coords[IDIR][kk][jj][ii] = (float)(x1*sin(x2)*cos(x3)); ,
150  cell_coords[JDIR][kk][jj][ii] = (float)(x1*sin(x2)*sin(x3)); ,
151  cell_coords[KDIR][kk][jj][ii] = (float)(x1*cos(x2));)
152  #endif
153  #else
154  print1 ("! HDF5_IO: Unknown geometry\n");
155  QUIT_PLUTO(1);
156  #endif
157  }}}
158 
159  }
160 
161 /* --------------------------------------------------------------
162  Since data is written in reverse order (Z-Y-X) it is
163  convenient to define pointers to grid in reverse order
164  -------------------------------------------------------------- */
165 
166  for (nd = 0; nd < DIMENSIONS; nd++) wgrid[nd] = grid + DIMENSIONS - nd - 1;
167 
168  #ifdef PARALLEL
169  MPI_Barrier (MPI_COMM_WORLD);
170  if (prank == 0)time(&tbeg);
171  #endif
172 
173  sprintf (filename, "%s/data.%04d.%s", output->dir,output->nfile, output->ext);
174 
175  rank = DIMENSIONS;
176  dimstr = 3;
177 
178  #ifdef PARALLEL
179  file_access = H5Pcreate(H5P_FILE_ACCESS);
180  #if MPI_POSIX == YES
181  H5Pset_fapl_mpiposix(file_access, MPI_COMM_WORLD, 1);
182  #else
183  H5Pset_fapl_mpio(file_access, MPI_COMM_WORLD, MPI_INFO_NULL);
184  #endif
185  file_identifier = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, file_access);
186  H5Pclose(file_access);
187  #else
188  file_identifier = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
189  #endif
190 
191  sprintf (tstepname, "Timestep_%d", output->nfile);
192  timestep = H5Gcreate(file_identifier, tstepname, 0);
193 
194  tspace = H5Screate(H5S_SCALAR);
195  tattr = H5Acreate(timestep, "Time", H5T_NATIVE_DOUBLE, tspace, H5P_DEFAULT);
196  err = H5Awrite(tattr, H5T_NATIVE_DOUBLE, &g_time);
197  H5Aclose(tattr);
198  H5Sclose(tspace);
199 
200  group = H5Gcreate(timestep, "vars", 0); /* Create group "vars" (cell-centered vars) */
201 
202 /* Define "coords" attribute of group "vars" */
203 
204  strspace = H5Screate_simple(1, &dimstr, NULL);
205  string_type = H5Tcopy(H5T_C_S1);
206  H5Tset_size(string_type, strlen("/cell_coords/X "));
207  H5Tset_strpad(string_type,H5T_STR_SPACEPAD);
208  stratt = H5Acreate(group,"coords",string_type,strspace, H5P_DEFAULT);
209  err = H5Awrite(stratt, string_type, coords);
210  H5Aclose(stratt);
211  H5Sclose(strspace);
212 
213  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_int_glob;
214 
215  dataspace = H5Screate_simple(rank, dimens, NULL);
216 
217  #ifdef PARALLEL
218  for (nd = 0; nd < DIMENSIONS; nd++) {
219  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
220  stride[nd] = 1;
221  count[nd] = wgrid[nd]->np_int;
222  }
223 
224  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, NULL);
225  #endif
226 
227  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_tot;
228 
229  memspace = H5Screate_simple(rank,dimens,NULL);
230 
231  for (nd = 0; nd < DIMENSIONS; nd++){
232  start[nd] = wgrid[nd]->nghost;
233  stride[nd] = 1;
234  count[nd] = wgrid[nd]->np_int;
235  }
236  err = H5Sselect_hyperslab(memspace,H5S_SELECT_SET, start, stride, count, NULL);
237 
238 /* ------------------------------------
239  write cell-centered data
240  ------------------------------------ */
241 
242  #if MPI_POSIX == NO
243  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
244  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
245  #endif
246 
247  for (nv = 0; nv < output->nvar; nv++) {
248 
249  /* -- skip variable if excluded from output or if it is staggered -- */
250 
251  if (!output->dump_var[nv] || output->stag_var[nv] != -1) continue;
252 
253  if (output->type == DBL_H5_OUTPUT){
254  dataset = H5Dcreate(group, output->var_name[nv], H5T_NATIVE_DOUBLE,
255  dataspace, H5P_DEFAULT);
256  #if MPI_POSIX == NO
257  err = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
258  plist_id_mpiio, output->V[nv][0][0]);
259  #else
260  err = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
261  H5P_DEFAULT, output->V[nv][0][0]);
262  #endif
263  }else if (output->type == FLT_H5_OUTPUT){
264  void *Vpt;
265  Vpt = (void *)(Convert_dbl2flt(output->V[nv],1.0, 0))[0][0];
266 
267  dataset = H5Dcreate(group, output->var_name[nv], H5T_NATIVE_FLOAT,
268  dataspace, H5P_DEFAULT);
269 
270  #if MPI_POSIX == NO
271  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
272  dataspace, plist_id_mpiio, Vpt);
273  #else
274  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
275  dataspace, H5P_DEFAULT, Vpt);
276  #endif
277  }
278  H5Dclose(dataset);
279  }
280 
281  #if MPI_POSIX == NO
282  H5Pclose(plist_id_mpiio);
283  #endif
284  H5Sclose(memspace);
285  H5Sclose(dataspace);
286  H5Gclose(group); /* Close group "vars" */
287 
288 /* --------------------------------------------------------------------- */
289 /*! \note Writing of staggered magnetic fields:
290  Here we follow the same convention used by ArrayLib, where internal
291  points that lie on the grid boundary between processors are owned
292  by the \b left processor.
293  For this reason, only the leftmost processor writes one additional
294  point in the direction of staggering while the other processors
295  write the same amount of zones as for cell-centered data.
296  Also, keep in mind that the data array Vs starts from -1
297  (instead of 0) in the staggered direction. */
298 /* --------------------------------------------------------------------- */
299 
300  #ifdef STAGGERED_MHD
301  if (output->type == DBL_H5_OUTPUT){
302  group = H5Gcreate(timestep, "stag_vars", 0); /* Create group "stag_vars" (staggered vars) */
303 
304  #if MPI_POSIX == NO
305  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
306  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
307  #endif
308 
309  for (ns = 0; ns < DIMENSIONS; ns++) {
310 
311  /* -- skip variable if excluded from output or if it is cell-centered -- */
312 
313  if (!output->dump_var[NVAR+ns] || output->stag_var[NVAR+ns] == -1) continue;
314  for (nd = 0; nd < DIMENSIONS; nd++) {
315  dimens[nd] = wgrid[nd]->np_int_glob + (ns == (DIMENSIONS-1-nd));
316  }
317  dataspace = H5Screate_simple(rank, dimens, NULL);
318 
319  #ifdef PARALLEL
320  for (nd = 0; nd < DIMENSIONS; nd++) {
321  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
322  stride[nd] = 1;
323  count[nd] = wgrid[nd]->np_int;
324  if (ns == DIMENSIONS-1-nd){
325  if (grid[ns].lbound != 0) count[nd] += 1;
326  else start[nd] += 1;
327  }
328  }
329  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET,
330  start, stride, count, NULL);
331  #endif
332 
333  for (nd = 0; nd < DIMENSIONS; nd++){
334  dimens[nd] = wgrid[nd]->np_tot + (ns==(DIMENSIONS-1-nd));
335  }
336  memspace = H5Screate_simple(rank,dimens,NULL);
337 
338  for (nd = 0; nd < DIMENSIONS; nd++){
339  start[nd] = wgrid[nd]->nghost;
340  stride[nd] = 1;
341  count[nd] = wgrid[nd]->np_int;
342  if (ns == (DIMENSIONS-1-nd) && grid[ns].lbound != 0) {
343  start[nd] -= 1;
344  count[nd] += 1;
345  }
346  }
347 
348  err = H5Sselect_hyperslab(memspace,H5S_SELECT_SET, start, stride, count, NULL);
349 
350  if (output->type == DBL_H5_OUTPUT){
351  dataset = H5Dcreate(group, output->var_name[NVAR+ns], H5T_NATIVE_DOUBLE,
352  dataspace, H5P_DEFAULT);
353  #if MPI_POSIX == NO
354  err = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
355  plist_id_mpiio, output->V[NVAR+ns][0][0]);
356  #else
357  err = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
358  H5P_DEFAULT, output->V[NVAR+ns][0][0]);
359  #endif
360  }else if (output->type == FLT_H5_OUTPUT){
361  void *Vpt;
362  Vpt = (void *)(Convert_dbl2flt(output->V[NVAR+ns],1.0, 0))[0][0];
363  dataset = H5Dcreate(group, output->var_name[NVAR+ns], H5T_NATIVE_FLOAT,
364  dataspace, H5P_DEFAULT);
365 
366  #if MPI_POSIX == NO
367  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
368  dataspace, plist_id_mpiio, Vpt);
369  #else
370  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
371  dataspace, H5P_DEFAULT, Vpt);
372  #endif
373  }
374 
375  H5Dclose(dataset);
376  H5Sclose(memspace);
377  H5Sclose(dataspace);
378  }
379 
380  #if MPI_POSIX == NO
381  H5Pclose(plist_id_mpiio);
382  #endif
383  H5Gclose(group);} /* Close group "stag_vars" */
384  #endif /* STAGGERED_MHD */
385 
386  H5Gclose(timestep);
387 
388  group = H5Gcreate(file_identifier, "cell_coords", 0); /* Create group "cell_coords" (centered mesh) */
389 
390  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_int_glob;
391  dataspace = H5Screate_simple(rank, dimens, NULL);
392 
393  #ifdef PARALLEL
394  for (nd = 0; nd < DIMENSIONS; nd++) {
395  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
396  stride[nd] = 1;
397  count[nd] = wgrid[nd]->np_int;
398  }
399  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET,
400  start, stride, count, NULL);
401  #endif
402 
403  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_int;
404  memspace = H5Screate_simple(rank,dimens,NULL);
405 
406 /* ------------------------------------
407  write cell centered mesh
408  ------------------------------------ */
409 
410  #if MPI_POSIX == NO
411  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
412  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
413  #endif
414 
415  for (nc = 0; nc < 3; nc++) {
416  dataset = H5Dcreate(group, cname[nc], H5T_NATIVE_FLOAT, dataspace, H5P_DEFAULT);
417 
418  #if MPI_POSIX == NO
419  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
420  dataspace, plist_id_mpiio, cell_coords[nc][0][0]);
421  #else
422  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
423  dataspace, H5P_DEFAULT, cell_coords[nc][0][0]);
424  #endif
425 
426  H5Dclose(dataset);
427  }
428 
429  #if MPI_POSIX == NO
430  H5Pclose(plist_id_mpiio);
431  #endif
432  H5Sclose(memspace);
433  H5Sclose(dataspace);
434  H5Gclose(group); /* Close group "cell_coords" */
435 
436  group = H5Gcreate(file_identifier, "node_coords", 0); /* Create group "node_coords" (node mesh) */
437 
438  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_int_glob+1;
439 
440  dataspace = H5Screate_simple(rank, dimens, NULL);
441 
442  #ifdef PARALLEL
443  for (nd = 0; nd < DIMENSIONS; nd++) {
444  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
445  stride[nd] = 1;
446  count[nd] = wgrid[nd]->np_int;
447  if (wgrid[nd]->rbound != 0) count[nd] += 1;
448  }
449 
450  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET,
451  start, stride, count, NULL);
452  #endif
453 
454  for (nd = 0; nd < DIMENSIONS; nd++) {
455  dimens[nd] = wgrid[nd]->np_int;
456  if (wgrid[nd]->rbound != 0) dimens[nd] += 1;
457  }
458  memspace = H5Screate_simple(rank,dimens,NULL);
459 
460 /* ------------------------------------
461  write node centered mesh
462  ------------------------------------ */
463 
464  #if MPI_POSIX == NO
465  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
466  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
467  #endif
468 
469  for (nc = 0; nc < 3; nc++) {
470  dataset = H5Dcreate(group, cname[nc], H5T_NATIVE_FLOAT, dataspace, H5P_DEFAULT);
471 
472  #if MPI_POSIX == NO
473  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
474  dataspace, plist_id_mpiio, node_coords[nc][0][0]);
475  #else
476  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
477  dataspace, H5P_DEFAULT, node_coords[nc][0][0]);
478  #endif
479 
480  H5Dclose(dataset);
481  }
482 
483  #if MPI_POSIX == NO
484  H5Pclose(plist_id_mpiio);
485  #endif
486  H5Sclose(memspace);
487  H5Sclose(dataspace);
488  H5Gclose(group); /* Close group "node_coords" */
489 
490  H5Fclose(file_identifier);
491 
492 /* Create XDMF file to read HDF5 output (Visit or Paraview) */
493 
494  if (prank == 0) {
495 
496  if (output->type == DBL_H5_OUTPUT){
497  sprintf(xmfext,"dbl.xmf");
498  nprec = 8;
499  } else if (output->type == FLT_H5_OUTPUT){
500  sprintf(xmfext,"flt.xmf");
501  nprec = 4;
502  }
503 
504  sprintf (filenamexmf, "%s/data.%04d.%s", output->dir, output->nfile, xmfext);
505 
506  fxmf = fopen(filenamexmf, "w");
507  fprintf(fxmf, "<?xml version=\"1.0\" ?>\n");
508  fprintf(fxmf, "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\n");
509  fprintf(fxmf, "<Xdmf Version=\"2.0\">\n");
510  fprintf(fxmf, " <Domain>\n");
511  fprintf(fxmf, " <Grid Name=\"node_mesh\" GridType=\"Uniform\">\n");
512  fprintf(fxmf, " <Time Value=\"%12.6e\"/>\n",g_time);
513  #if DIMENSIONS == 2
514  fprintf(fxmf," <Topology TopologyType=\"2DSMesh\" NumberOfElements=\"%d %d\"/>\n",
515  wgrid[0]->np_int_glob+1, wgrid[1]->np_int_glob+1);
516  fprintf(fxmf, " <Geometry GeometryType=\"X_Y\">\n");
517  for (nd = 0; nd < DIMENSIONS; nd++) {
518  fprintf(fxmf," <DataItem Dimensions=\"%d %d\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n",
519  wgrid[0]->np_int_glob+1, wgrid[1]->np_int_glob+1 );
520  fprintf(fxmf, " %s:/node_coords/%s\n",filename,cname[nd]);
521  fprintf(fxmf, " </DataItem>\n");
522  }
523  #elif DIMENSIONS == 3
524  fprintf(fxmf, " <Topology TopologyType=\"3DSMesh\" NumberOfElements=\"%d %d %d\"/>\n",
525  wgrid[0]->np_int_glob+1, wgrid[1]->np_int_glob+1, wgrid[2]->np_int_glob+1);
526  fprintf(fxmf, " <Geometry GeometryType=\"X_Y_Z\">\n");
527  for (nd = 0; nd < DIMENSIONS; nd++) {
528  fprintf(fxmf, " <DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n",
529  wgrid[0]->np_int_glob+1, wgrid[1]->np_int_glob+1, wgrid[2]->np_int_glob+1 );
530  fprintf(fxmf, " %s:/node_coords/%s\n",filename,cname[nd]);
531  fprintf(fxmf, " </DataItem>\n");
532  }
533  #endif
534  fprintf(fxmf, " </Geometry>\n");
535  for (nv = 0; nv < output->nvar; nv++) { /* Write cell-centered variables */
536  if (!output->dump_var[nv] || output->stag_var[nv] != -1) continue; /* -- skip variable if excluded from output or if it is staggered */
537  fprintf(fxmf, " <Attribute Name=\"%s\" AttributeType=\"Scalar\" Center=\"Cell\">\n",
538  output->var_name[nv]);
539  #if DIMENSIONS == 2
540  fprintf(fxmf, " <DataItem Dimensions=\"%d %d\" NumberType=\"Float\" Precision=\"%d\" Format=\"HDF\">\n",
541  wgrid[0]->np_int_glob, wgrid[1]->np_int_glob, nprec);
542  #elif DIMENSIONS == 3
543  fprintf(fxmf, " <DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Precision=\"%d\" Format=\"HDF\">\n",
544  wgrid[0]->np_int_glob, wgrid[1]->np_int_glob, wgrid[2]->np_int_glob, nprec);
545  #endif
546  fprintf(fxmf, " %s:%s/vars/%s\n",filename,tstepname,output->var_name[nv]);
547  fprintf(fxmf, " </DataItem>\n");
548  fprintf(fxmf, " </Attribute>\n");
549  }
550  for (nd = 0; nd < DIMENSIONS; nd++) { /* Write cell center coordinates (as cell-centerd variables) */
551  fprintf(fxmf, " <Attribute Name=\"%s\" AttributeType=\"Scalar\" Center=\"Cell\">\n",
552  cname[nd]);
553  #if DIMENSIONS == 2
554  fprintf(fxmf, " <DataItem Dimensions=\"%d %d\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n",
555  wgrid[0]->np_int_glob, wgrid[1]->np_int_glob);
556  #elif DIMENSIONS == 3
557  fprintf(fxmf, " <DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n",
558  wgrid[0]->np_int_glob, wgrid[1]->np_int_glob, wgrid[2]->np_int_glob);
559  #endif
560  fprintf(fxmf, " %s:/cell_coords/%s\n",filename,cname[nd]);
561  fprintf(fxmf, " </DataItem>\n");
562  fprintf(fxmf, " </Attribute>\n");
563  }
564  fprintf(fxmf, " </Grid>\n");
565  fprintf(fxmf, " </Domain>\n");
566  fprintf(fxmf, "</Xdmf>\n");
567  fclose(fxmf);
568  } /* if (prank == 0) */
569 
570 /* XDMF file */
571 
572  #ifdef PARALLEL
573  MPI_Barrier (MPI_COMM_WORLD);
574  if (prank == 0){
575  time(&tend);
576  print1 (" [%5.2f sec]",difftime(tend,tbeg));
577  }
578  #endif
579 }
580 
581 /* ********************************************************************* */
582 void ReadHDF5 (Output *output, Grid *grid)
583 /*!
584  * Read data from disk using hdf5 format (double precision).
585  *
586  * \param [in] output the output structure associated with HDF5 format
587  * \param [in] grid a pointer to an array of Grid structures
588  *
589  * \return This function has no return value.
590  *********************************************************************** */
591 {
592  hid_t dataspace, memspace, dataset;
593  hid_t file_identifier, group, timestep;
594  hid_t file_access = 0;
595 #if MPI_POSIX == NO
596  hid_t plist_id_mpiio = 0; /* for collective MPI I/O */
597 #endif
598  hid_t err;
599 
600  hsize_t dimens[DIMENSIONS];
601  hsize_t start[DIMENSIONS];
602  hsize_t stride[DIMENSIONS];
603  hsize_t count[DIMENSIONS];
604 
605  char filename[128], tstepname[32];
606  int ierr, rank, nd, nv, ns;
607  Grid *wgrid[3];
608 
609 /* --------------------------------------------------------------
610  Since data is read in reverse order (Z-Y-X) it is
611  convenient to define pointers to grid in reverse order
612  -------------------------------------------------------------- */
613 
614  for (nd = 0; nd < DIMENSIONS; nd++) wgrid[nd] = grid + DIMENSIONS - nd - 1;
615 
616  print1 ("> restarting from file #%d (dbl.h5)\n",output->nfile);
617  sprintf (filename, "%s/data.%04d.dbl.h5", output->dir, output->nfile);
618 
619  rank = DIMENSIONS;
620 
621  #ifdef PARALLEL
622  file_access = H5Pcreate (H5P_FILE_ACCESS);
623  #if MPI_POSIX == YES
624  H5Pset_fapl_mpiposix(file_access, MPI_COMM_WORLD, 1);
625  #else
626  H5Pset_fapl_mpio(file_access, MPI_COMM_WORLD, MPI_INFO_NULL);
627  #endif
628  file_identifier = H5Fopen(filename, H5F_ACC_RDONLY, file_access);
629  H5Pclose(file_access);
630  #else
631  file_identifier = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
632  #endif
633 
634  if (file_identifier < 0){
635  print1 ("! HDF5_READ: file %s does not exist\n");
636  QUIT_PLUTO(1);
637  }
638 
639  sprintf (tstepname, "Timestep_%d", output->nfile);
640  timestep = H5Gopen(file_identifier, tstepname);
641  group = H5Gopen(timestep, "vars");
642 
643  #if MPI_POSIX == NO
644  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
645  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
646  #endif
647 
648  for (nv = 0; nv < output->nvar; nv++) {
649 
650  /* -- skip variable if excluded from output or if it is staggered -- */
651 
652  if (!output->dump_var[nv] || output->stag_var[nv] != -1) continue;
653 
654  dataset = H5Dopen(group, output->var_name[nv]);
655  dataspace = H5Dget_space(dataset);
656 
657  #ifdef PARALLEL
658  for (nd = 0; nd < DIMENSIONS; nd++) {
659  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
660  stride[nd] = 1;
661  count[nd] = wgrid[nd]->np_int;
662  }
663  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, NULL);
664  #endif
665 
666  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_tot;
667 
668  memspace = H5Screate_simple(rank,dimens,NULL);
669 
670  for (nd = 0; nd < DIMENSIONS; nd++){
671  start[nd] = wgrid[nd]->nghost;
672  stride[nd] = 1;
673  count[nd] = wgrid[nd]->np_int;
674  }
675 
676  err = H5Sselect_hyperslab(memspace,H5S_SELECT_SET, start, stride, count, NULL);
677 
678  #if MPI_POSIX == NO
679  err = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
680  plist_id_mpiio, output->V[nv][0][0]);
681  #else
682  err = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
683  H5P_DEFAULT, output->V[nv][0][0]);
684  #endif
685 
686  H5Dclose(dataset);
687  H5Sclose(memspace);
688  H5Sclose(dataspace);
689  }
690 
691  #if MPI_POSIX == NO
692  H5Pclose(plist_id_mpiio);
693  #endif
694  H5Gclose(group);
695 
696  #ifdef STAGGERED_MHD
697  group = H5Gopen(timestep, "stag_vars");
698 
699  #if MPI_POSIX == NO
700  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
701  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
702  #endif
703 
704  for (ns = 0; ns < DIMENSIONS; ns++) {
705  dataset = H5Dopen(group, output->var_name[NVAR+ns]);
706  dataspace = H5Dget_space(dataset);
707 
708  #ifdef PARALLEL
709  for (nd = 0; nd < DIMENSIONS; nd++) {
710  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
711  stride[nd] = 1;
712  count[nd] = wgrid[nd]->np_int;
713  if (ns == DIMENSIONS-1-nd){
714  if (grid[ns].lbound != 0) count[nd] += 1;
715  else start[nd] += 1;
716  }
717  }
718  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET,
719  start, stride, count, NULL);
720  #endif
721 
722  for (nd = 0; nd < DIMENSIONS; nd++)
723  dimens[nd] = wgrid[nd]->np_tot + (ns == (DIMENSIONS-1-nd));
724 
725  memspace = H5Screate_simple(rank,dimens,NULL);
726 
727  for (nd = 0; nd < DIMENSIONS; nd++){
728  start[nd] = wgrid[nd]->nghost;
729  stride[nd] = 1;
730  count[nd] = wgrid[nd]->np_int;
731  if (ns == DIMENSIONS-1-nd && grid[ns].lbound != 0) {
732  start[nd] -= 1;
733  count[nd] += 1;
734  }
735  }
736 
737  err = H5Sselect_hyperslab(memspace,H5S_SELECT_SET,
738  start, stride, count, NULL);
739  #if MPI_POSIX == NO
740  err = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace,
741  dataspace, plist_id_mpiio, output->V[NVAR+ns][0][0]);
742  #else
743  err = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace,
744  dataspace, H5P_DEFAULT, output->V[NVAR+ns][0][0]);
745  #endif
746  H5Dclose(dataset);
747  H5Sclose(memspace);
748  H5Sclose(dataspace);
749  }
750 
751  #if MPI_POSIX == NO
752  H5Pclose(plist_id_mpiio);
753  #endif
754  H5Gclose(group);
755  #endif
756 
757  H5Gclose(timestep);
758  H5Fclose(file_identifier);
759 }
void ReadHDF5(Output *output, Grid *grid)
Definition: hdf5_io.c:582
#define FLT_H5_OUTPUT
Definition: pluto.h:83
static int rbound
Definition: jet_domain.c:25
double *** V[64]
pointer to arrays being written - same for all
Definition: structs.h:247
long int NX1
Number of interior zones in the X1 directions (boundaries excluded) for the local processor...
Definition: globals.h:48
void print1(const char *fmt,...)
Definition: amrPluto.cpp:511
int nfile
current number being saved - one per output
Definition: structs.h:237
int np_int_glob
Total number of points in the global domain (boundaries excluded).
Definition: structs.h:98
int rbound
Same as lbound, but for the right edge of the grid.
Definition: structs.h:112
#define SPHERICAL
Definition: pluto.h:36
void WriteHDF5(Output *output, Grid *grid)
Definition: hdf5_io.c:45
char dir[256]
output directory name
Definition: structs.h:244
int prank
Processor rank.
Definition: globals.h:33
float *** Convert_dbl2flt(double ***Vdbl, double unit, int swap_endian)
Definition: bin_io.c:216
#define KDIR
Definition: pluto.h:195
char ext[8]
output extension
Definition: structs.h:243
#define POLAR
Definition: pluto.h:35
double * xl
Definition: structs.h:82
long int NX2
Number of interior zones in the X2 directions (boundaries excluded) for the local processor...
Definition: globals.h:50
#define IDIR
Definition: pluto.h:193
int beg
Global start index for the local array.
Definition: structs.h:115
#define DBL_H5_OUTPUT
Definition: pluto.h:82
Definition: structs.h:78
#define ARRAY_4D(nx, ny, nz, nv, type)
Definition: prototypes.h:173
int nghost
Number of ghost zones.
Definition: structs.h:104
int nvar
tot.
Definition: structs.h:234
double * x
Definition: structs.h:80
#define GEOMETRY
Definition: definitions_01.h:4
int * dump_var
select vars being written - one per output
Definition: structs.h:240
PLUTO main header file.
D_EXPAND(tot/[n]=(double) grid[IDIR].np_int_glob;, tot/[n]=(double) grid[JDIR].np_int_glob;, tot/[n]=(double) grid[KDIR].np_int_glob;)
Definition: analysis.c:27
double g_time
The current integration time.
Definition: globals.h:117
int type
output format (DBL, FLT, ...) - one per output
Definition: structs.h:233
int * stag_var
centered or staggered variable - same for all
Definition: structs.h:239
long int KBEG
Lower grid index of the computational domain in the the X3 direction for the local processor...
Definition: globals.h:43
int np_tot
Total number of points in the local domain (boundaries included).
Definition: structs.h:100
#define JDIR
Definition: pluto.h:194
long int JBEG
Lower grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:39
#define NVAR
Definition: pluto.h:609
#define QUIT_PLUTO(e_code)
Definition: macros.h:125
long int NX3
Number of interior zones in the X3 directions (boundaries excluded) for the local processor...
Definition: globals.h:52
char ** var_name
variable names - same for all
Definition: structs.h:242
long int IBEG
Lower grid index of the computational domain in the the X1 direction for the local processor...
Definition: globals.h:35
#define DIMENSIONS
Definition: definitions_01.h:2
int np_int
Total number of points in the local domain (boundaries excluded).
Definition: structs.h:102