PLUTO
hdf5_io.vChombo3.1.c File Reference

HDF5 I/O main driver. More...

#include "pluto.h"
#include "hdf5.h"
Include dependency graph for hdf5_io.vChombo3.1.c:

Go to the source code of this file.

Macros

#define H5_USE_16_API
 
#define MPI_POSIX   YES
 

Functions

void WriteHDF5 (Output *output, Grid *grid)
 
void ReadHDF5 (Output *output, Grid *grid)
 

Detailed Description

HDF5 I/O main driver.

This file provides I/O functionality for HDF5 data format. The output suffix is ".h5".

The WriteHDF5() function allows to write data in serial or parallel mode in either single or double precision.

The ReadHDF5() function allows to read double precision data in serial or parrallel mode.

Note
By turning "MPI_POSIX" to "YES", HDF5 uses another parallel IO driver called MPI POSIX which is a "combination" MPI-2 and posix I/O driver. It uses MPI for coordinating the actions of several processes and posix I/O calls to do the actual I/O to the disk. There is no collective I/O mode with this driver. This will almost certainly not work correctly for files accessed on distributed parallel systems with the file located on a non-parallel filesystem. On some systems, Using MPI POSIX driver may perform better than using MPI-IO driver with independent IO mode.
For more info take a look at http://www.hdfgroup.org/HDF5/PHDF5/parallelhdf5hints.pdf
Authors
C. Zanni (zanni.nosp@m.@oat.nosp@m.o.ina.nosp@m.f.it)
A. Mignone (migno.nosp@m.ne@p.nosp@m.h.uni.nosp@m.to.i.nosp@m.t) G. Musicanisi (g.mus.nosp@m.cian.nosp@m.isi@c.nosp@m.inec.nosp@m.a.it)
Date
May 22, 2014

Definition in file hdf5_io.vChombo3.1.c.

Macro Definition Documentation

#define H5_USE_16_API

Definition at line 35 of file hdf5_io.vChombo3.1.c.

#define MPI_POSIX   YES

Definition at line 39 of file hdf5_io.vChombo3.1.c.

Function Documentation

void ReadHDF5 ( Output output,
Grid grid 
)

Read data from disk using hdf5 format (double precision).

Parameters
[in]outputthe output structure associated with HDF5 format
[in]grida pointer to an array of Grid structures
Returns
This function has no return value.

Definition at line 566 of file hdf5_io.vChombo3.1.c.

575 {
576  hid_t dataspace, memspace, dataset;
577  hid_t file_identifier, group;
578  hid_t file_access = 0;
579 #if MPI_POSIX == NO
580  hid_t plist_id_mpiio = 0; /* for collective MPI I/O */
581 #endif
582  hid_t err;
583 
584  hsize_t dimens[DIMENSIONS];
585  hsize_t start[DIMENSIONS];
586  hsize_t stride[DIMENSIONS];
587  hsize_t count[DIMENSIONS];
588 
589  char filename[128];
590  int ierr, rank, nd, nv, ns;
591  Grid *wgrid[3];
592 
593 /* --------------------------------------------------------------
594  Since data is read in reverse order (Z-Y-X) it is
595  convenient to define pointers to grid in reverse order
596  -------------------------------------------------------------- */
597 
598  for (nd = 0; nd < DIMENSIONS; nd++) wgrid[nd] = grid + DIMENSIONS - nd - 1;
599 
600  print1 ("> restarting from file #%d (dbl.h5)\n",output->nfile);
601  sprintf (filename, "%s/data.%04d.dbl.h5", output->dir, output->nfile);
602 
603  rank = DIMENSIONS;
604 
605  #ifdef PARALLEL
606  file_access = H5Pcreate (H5P_FILE_ACCESS);
607  #if MPI_POSIX == YES
608  H5Pset_fapl_mpiposix(file_access, MPI_COMM_WORLD, 1);
609  #else
610  H5Pset_fapl_mpio(file_access, MPI_COMM_WORLD, MPI_INFO_NULL);
611  #endif
612  #else
613  file_access = H5P_DEFAULT;
614  #endif
615 
616  file_identifier = H5Fopen(filename, H5F_ACC_RDONLY, file_access);
617  if (file_identifier < 0){
618  print1 ("! HDF5_READ: file %s does not exist\n");
619  QUIT_PLUTO(1);
620  }
621 
622  ierr = H5Pclose(file_access);
623 
624  group = H5Gopen(file_identifier, "vars");
625 
626  for (nv = 0; nv < NVAR; nv++) {
627  if (!output->dump_var[nv]) continue;
628 
629  dataset = H5Dopen(group, output->var_name[nv]);
630  dataspace = H5Dget_space(dataset);
631 
632  #ifdef PARALLEL
633  for (nd = 0; nd < DIMENSIONS; nd++) {
634  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
635  stride[nd] = 1;
636  count[nd] = wgrid[nd]->np_int;
637  }
638  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, NULL);
639  #endif
640 
641  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_tot;
642 
643  memspace = H5Screate_simple(rank,dimens,NULL);
644 
645  for (nd = 0; nd < DIMENSIONS; nd++){
646  start[nd] = wgrid[nd]->nghost;
647  stride[nd] = 1;
648  count[nd] = wgrid[nd]->np_int;
649  }
650 
651  err = H5Sselect_hyperslab(memspace,H5S_SELECT_SET, start, stride, count, NULL);
652 
653 #if MPI_POSIX == NO
654  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
655  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
656  err = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
657  plist_id_mpiio, output->V[nv][0][0]);
658 #else
659  err = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
660  H5P_DEFAULT, output->V[nv][0][0]);
661 #endif
662 
663  H5Dclose(dataset);
664 #if MPI_POSIX == NO
665  H5Pclose(plist_id_mpiio);
666 #endif
667  H5Sclose(memspace);
668  H5Sclose(dataspace);
669  }
670 
671  H5Gclose(group);
672 
673  #ifdef STAGGERED_MHD
674  group = H5Gopen(file_identifier, "stag_vars");
675 
676  for (ns = 0; ns < DIMENSIONS; ns++) {
677  dataset = H5Dopen(group, output->var_name[NVAR+ns]);
678  dataspace = H5Dget_space(dataset);
679 
680  #ifdef PARALLEL
681  for (nd = 0; nd < DIMENSIONS; nd++) {
682  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
683  stride[nd] = 1;
684  count[nd] = wgrid[nd]->np_int;
685  if (ns == DIMENSIONS-1-nd){
686  if (grid[ns].lbound != 0) count[nd] += 1;
687  else start[nd] += 1;
688  }
689  }
690  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET,
691  start, stride, count, NULL);
692  #endif
693 
694  for (nd = 0; nd < DIMENSIONS; nd++)
695  dimens[nd] = wgrid[nd]->np_tot + (ns == (DIMENSIONS-1-nd));
696 
697  memspace = H5Screate_simple(rank,dimens,NULL);
698 
699  for (nd = 0; nd < DIMENSIONS; nd++){
700  start[nd] = wgrid[nd]->nghost;
701  stride[nd] = 1;
702  count[nd] = wgrid[nd]->np_int;
703  if (ns == DIMENSIONS-1-nd && grid[ns].lbound != 0) {
704  start[nd] -= 1;
705  count[nd] += 1;
706  }
707  }
708 
709  err = H5Sselect_hyperslab(memspace,H5S_SELECT_SET,
710  start, stride, count, NULL);
711 #if MPI_POSIX == NO
712  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
713  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
714  err = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace,
715  dataspace, plist_id_mpiio, output->V[NVAR+ns][0][0]);
716 #else
717  err = H5Dread(dataset, H5T_NATIVE_DOUBLE, memspace,
718  dataspace, H5P_DEFAULT, output->V[NVAR+ns][0][0]);
719 #endif
720  H5Dclose(dataset);
721 #if MPI_POSIX == NO
722  H5Pclose(plist_id_mpiio);
723 #endif
724  H5Sclose(memspace);
725  H5Sclose(dataspace);
726  }
727  H5Gclose(group);
728  #endif
729 
730  H5Fclose(file_identifier);
731 }
double *** V[64]
pointer to arrays being written - same for all
Definition: structs.h:247
void print1(const char *fmt,...)
Definition: amrPluto.cpp:511
int nfile
current number being saved - one per output
Definition: structs.h:237
char dir[256]
output directory name
Definition: structs.h:244
int beg
Global start index for the local array.
Definition: structs.h:115
Definition: structs.h:78
int nghost
Number of ghost zones.
Definition: structs.h:104
int * dump_var
select vars being written - one per output
Definition: structs.h:240
#define NVAR
Definition: pluto.h:609
#define QUIT_PLUTO(e_code)
Definition: macros.h:125
char ** var_name
variable names - same for all
Definition: structs.h:242
#define DIMENSIONS
Definition: definitions_01.h:2
int np_int
Total number of points in the local domain (boundaries excluded).
Definition: structs.h:102

Here is the call graph for this function:

void WriteHDF5 ( Output output,
Grid grid 
)

Write data to disk using hdf5 format in single or double precision.

Parameters
[in]outputthe output structure associated with HDF5 format
[in]grida pointer to an array of Grid structures
Returns
This function has no return value.
Note
Writing of staggered magnetic fields: Here we follow the same convention used by ArrayLib, where internal points that lie on the grid boundary between processors are owned by the left processor. For this reason, only the leftmost processor writes one additional point in the direction of staggering while the other processors write the same amount of zones as for cell-centered data. Also, keep in mind that the data array Vs starts from -1 (instead of 0) in the staggered direction.

Definition at line 45 of file hdf5_io.vChombo3.1.c.

55 {
56  hid_t dataspace, memspace, dataset;
57  hid_t strspace, stratt, string_type;
58  hid_t file_identifier, group;
59  hid_t file_access = 0;
60 #if MPI_POSIX == NO
61  hid_t plist_id_mpiio = 0; /* for collective MPI I/O */
62 #endif
63  hid_t err;
64 
65  hsize_t dimstr;
66  hsize_t dimens[DIMENSIONS];
67  hsize_t start[DIMENSIONS];
68  hsize_t stride[DIMENSIONS];
69  hsize_t count[DIMENSIONS];
70 
71  time_t tbeg, tend;
72  static float ****node_coords, ****cell_coords;
73  char filename[512], filenamexmf[512];
74  char *coords = "/cell_coords/X /cell_coords/Y /cell_coords/Z ";
75  char *cname[] = {"X", "Y", "Z"};
76  char xmfext[8];
77  int ierr, rank, nd, nv, ns, nc, ngh, ii, jj, kk;
78  int n1p, n2p, n3p, nprec;
79  Grid *wgrid[3];
80  FILE *fxmf;
81 
82 /* ----------------------------------------------------------------
83  compute coordinates just once
84  ---------------------------------------------------------------- */
85 
86  if (node_coords == NULL) {
87  double x1, x2, x3;
88 
89  n1p = NX1 + (grid[IDIR].rbound != 0);
90  n2p = NX2 + (grid[JDIR].rbound != 0);
91  n3p = NX3 + (grid[KDIR].rbound != 0);
92 
93  node_coords = ARRAY_4D(3, n3p, n2p, n1p, float);
94  cell_coords = ARRAY_4D(3, NX3, NX2, NX1, float);
95 
96  for (kk = 0; kk < n3p; kk++) { x3 = grid[KDIR].xl[KBEG+kk];
97  for (jj = 0; jj < n2p; jj++) { x2 = grid[JDIR].xl[JBEG+jj];
98  for (ii = 0; ii < n1p; ii++) { x1 = grid[IDIR].xl[IBEG+ii];
99 
100  node_coords[JDIR][kk][jj][ii] = 0.0;
101  node_coords[KDIR][kk][jj][ii] = 0.0;
102 
103  #if GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL
104  D_EXPAND(node_coords[IDIR][kk][jj][ii] = (float)x1; ,
105  node_coords[JDIR][kk][jj][ii] = (float)x2; ,
106  node_coords[KDIR][kk][jj][ii] = (float)x3;)
107  #elif GEOMETRY == POLAR
108  D_EXPAND(node_coords[IDIR][kk][jj][ii] = (float)(x1*cos(x2)); ,
109  node_coords[JDIR][kk][jj][ii] = (float)(x1*sin(x2)); ,
110  node_coords[KDIR][kk][jj][ii] = (float)(x3);)
111  #elif GEOMETRY == SPHERICAL
112  #if DIMENSIONS <= 2
113  D_EXPAND(node_coords[IDIR][kk][jj][ii] = (float)(x1*sin(x2)); ,
114  node_coords[JDIR][kk][jj][ii] = (float)(x1*cos(x2)); ,
115  node_coords[KDIR][kk][jj][ii] = 0.0;)
116  #else
117  D_EXPAND(node_coords[IDIR][kk][jj][ii] = (float)(x1*sin(x2)*cos(x3)); ,
118  node_coords[JDIR][kk][jj][ii] = (float)(x1*sin(x2)*sin(x3)); ,
119  node_coords[KDIR][kk][jj][ii] = (float)(x1*cos(x2));)
120  #endif
121  #else
122  print1 ("! HDF5_IO: Unknown geometry\n");
123  QUIT_PLUTO(1);
124  #endif
125  }}}
126 
127  for (kk = 0; kk < NX3; kk++) { x3 = grid[KDIR].x[KBEG+kk];
128  for (jj = 0; jj < NX2; jj++) { x2 = grid[JDIR].x[JBEG+jj];
129  for (ii = 0; ii < NX1; ii++) { x1 = grid[IDIR].x[IBEG+ii];
130 
131  cell_coords[JDIR][kk][jj][ii] = 0.0;
132  cell_coords[KDIR][kk][jj][ii] = 0.0;
133 
134  #if GEOMETRY == CARTESIAN || GEOMETRY == CYLINDRICAL
135  D_EXPAND(cell_coords[IDIR][kk][jj][ii] = (float)x1; ,
136  cell_coords[JDIR][kk][jj][ii] = (float)x2; ,
137  cell_coords[KDIR][kk][jj][ii] = (float)x3;)
138  #elif GEOMETRY == POLAR
139  D_EXPAND(cell_coords[IDIR][kk][jj][ii] = (float)(x1*cos(x2)); ,
140  cell_coords[JDIR][kk][jj][ii] = (float)(x1*sin(x2)); ,
141  cell_coords[KDIR][kk][jj][ii] = (float)(x3);)
142  #elif GEOMETRY == SPHERICAL
143  #if DIMENSIONS <= 2
144  D_EXPAND(cell_coords[IDIR][kk][jj][ii] = (float)(x1*sin(x2)); ,
145  cell_coords[JDIR][kk][jj][ii] = (float)(x1*cos(x2)); ,
146  cell_coords[KDIR][kk][jj][ii] = 0.0;)
147  #else
148  D_EXPAND(cell_coords[IDIR][kk][jj][ii] = (float)(x1*sin(x2)*cos(x3)); ,
149  cell_coords[JDIR][kk][jj][ii] = (float)(x1*sin(x2)*sin(x3)); ,
150  cell_coords[KDIR][kk][jj][ii] = (float)(x1*cos(x2));)
151  #endif
152  #else
153  print1 ("! HDF5_IO: Unknown geometry\n");
154  QUIT_PLUTO(1);
155  #endif
156  }}}
157 
158  }
159 
160 /* --------------------------------------------------------------
161  Since data is written in reverse order (Z-Y-X) it is
162  convenient to define pointers to grid in reverse order
163  -------------------------------------------------------------- */
164 
165  for (nd = 0; nd < DIMENSIONS; nd++) wgrid[nd] = grid + DIMENSIONS - nd - 1;
166 
167  #ifdef PARALLEL
168  MPI_Barrier (MPI_COMM_WORLD);
169  if (prank == 0)time(&tbeg);
170  #endif
171 
172  sprintf (filename, "%s/data.%04d.%s", output->dir,output->nfile, output->ext);
173 
174  rank = DIMENSIONS;
175  dimstr = 3;
176 
177  #ifdef PARALLEL
178  file_access = H5Pcreate(H5P_FILE_ACCESS);
179  #if MPI_POSIX == YES
180  H5Pset_fapl_mpiposix(file_access, MPI_COMM_WORLD, 1);
181  #else
182  H5Pset_fapl_mpio(file_access, MPI_COMM_WORLD, MPI_INFO_NULL);
183  #endif
184  #else
185  file_access = H5P_DEFAULT;
186  #endif
187 
188  file_identifier = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, file_access);
189 
190  ierr = H5Pclose(file_access);
191 
192  group = H5Gcreate(file_identifier, "vars", 0); /* Create group "vars" (cell-centered vars) */
193 
194 /* Define "coords" attribute of group "vars" */
195 
196  strspace = H5Screate_simple(1, &dimstr, NULL);
197  string_type = H5Tcopy(H5T_C_S1);
198  H5Tset_size(string_type, strlen("/cell_coords/X "));
199  H5Tset_strpad(string_type,H5T_STR_SPACEPAD);
200  stratt = H5Acreate(group,"coords",string_type,strspace, H5P_DEFAULT);
201  err = H5Awrite(stratt, string_type, coords);
202  H5Aclose(stratt);
203  H5Sclose(strspace);
204 
205  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_int_glob;
206 
207  dataspace = H5Screate_simple(rank, dimens, NULL);
208 
209  #ifdef PARALLEL
210  for (nd = 0; nd < DIMENSIONS; nd++) {
211  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
212  stride[nd] = 1;
213  count[nd] = wgrid[nd]->np_int;
214  }
215 
216  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, NULL);
217  #endif
218 
219  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_tot;
220 
221  memspace = H5Screate_simple(rank,dimens,NULL);
222 
223  for (nd = 0; nd < DIMENSIONS; nd++){
224  start[nd] = wgrid[nd]->nghost;
225  stride[nd] = 1;
226  count[nd] = wgrid[nd]->np_int;
227  }
228  err = H5Sselect_hyperslab(memspace,H5S_SELECT_SET, start, stride, count, NULL);
229 
230 /* ------------------------------------
231  write cell-centered data
232  ------------------------------------ */
233 
234  for (nv = 0; nv < output->nvar; nv++) {
235 
236  /* -- skip variable if excluded from output or if it is staggered -- */
237 
238  if (!output->dump_var[nv] || output->stag_var[nv] != -1) continue;
239 
240  if (output->type == DBL_H5_OUTPUT){
241  dataset = H5Dcreate(group, output->var_name[nv], H5T_NATIVE_DOUBLE,
242  dataspace, H5P_DEFAULT);
243 #if MPI_POSIX == NO
244  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
245  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
246  err = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
247  plist_id_mpiio, output->V[nv][0][0]);
248 #else
249  err = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
250  H5P_DEFAULT, output->V[nv][0][0]);
251 #endif
252  }else if (output->type == FLT_H5_OUTPUT){
253  void *Vpt;
254  Vpt = (void *)(Convert_dbl2flt(output->V[nv],1.0, 0))[0][0];
255 
256  dataset = H5Dcreate(group, output->var_name[nv], H5T_NATIVE_FLOAT,
257  dataspace, H5P_DEFAULT);
258 
259 #if MPI_POSIX == NO
260  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
261  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
262  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
263  dataspace, plist_id_mpiio, Vpt);
264 #else
265  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
266  dataspace, H5P_DEFAULT, Vpt);
267 #endif
268  }
269  H5Dclose(dataset);
270 #if MPI_POSIX == NO
271  H5Pclose(plist_id_mpiio);
272 #endif
273  }
274  H5Sclose(memspace);
275  H5Sclose(dataspace);
276  H5Gclose(group); /* Close group "vars" */
277 
278  group = H5Gcreate(file_identifier, "cell_coords", 0); /* Create group "cell_coords" (centered mesh) */
279 
280  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_int_glob;
281  dataspace = H5Screate_simple(rank, dimens, NULL);
282 
283  #ifdef PARALLEL
284  for (nd = 0; nd < DIMENSIONS; nd++) {
285  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
286  stride[nd] = 1;
287  count[nd] = wgrid[nd]->np_int;
288  }
289  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET,
290  start, stride, count, NULL);
291  #endif
292  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_int;
293  memspace = H5Screate_simple(rank,dimens,NULL);
294 
295 /* ------------------------------------
296  write cell centered mesh
297  ------------------------------------ */
298 
299  for (nc = 0; nc < 3; nc++) {
300  dataset = H5Dcreate(group, cname[nc], H5T_NATIVE_FLOAT, dataspace, H5P_DEFAULT);
301 
302 #if MPI_POSIX == NO
303  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
304  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
305  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
306  dataspace, plist_id_mpiio, cell_coords[nc][0][0]);
307 #else
308  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
309  dataspace, H5P_DEFAULT, cell_coords[nc][0][0]);
310 #endif
311 
312  H5Dclose(dataset);
313 #if MPI_POSIX == NO
314  H5Pclose(plist_id_mpiio);
315 #endif
316  }
317  H5Sclose(memspace);
318  H5Sclose(dataspace);
319  H5Gclose(group); /* Close group "cell_coords" */
320 
321  group = H5Gcreate(file_identifier, "node_coords", 0); /* Create group "node_coords" (node mesh) */
322 
323  for (nd = 0; nd < DIMENSIONS; nd++) dimens[nd] = wgrid[nd]->np_int_glob+1;
324 
325  dataspace = H5Screate_simple(rank, dimens, NULL);
326 
327  #ifdef PARALLEL
328  for (nd = 0; nd < DIMENSIONS; nd++) {
329  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
330  stride[nd] = 1;
331  count[nd] = wgrid[nd]->np_int;
332  if (wgrid[nd]->rbound != 0) count[nd] += 1;
333  }
334 
335  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET,
336  start, stride, count, NULL);
337  #endif
338 
339  for (nd = 0; nd < DIMENSIONS; nd++) {
340  dimens[nd] = wgrid[nd]->np_int;
341  if (wgrid[nd]->rbound != 0) dimens[nd] += 1;
342  }
343  memspace = H5Screate_simple(rank,dimens,NULL);
344 
345 /* ------------------------------------
346  write node centered mesh
347  ------------------------------------ */
348 
349  for (nc = 0; nc < 3; nc++) {
350  dataset = H5Dcreate(group, cname[nc], H5T_NATIVE_FLOAT, dataspace, H5P_DEFAULT);
351 
352 #if MPI_POSIX == NO
353  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
354  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
355  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
356  dataspace, plist_id_mpiio, node_coords[nc][0][0]);
357 #else
358  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
359  dataspace, H5P_DEFAULT, node_coords[nc][0][0]);
360 #endif
361 
362  H5Dclose(dataset);
363 #if MPI_POSIX == NO
364  H5Pclose(plist_id_mpiio);
365 #endif
366  }
367  H5Sclose(memspace);
368  H5Sclose(dataspace);
369  H5Gclose(group); /* Close group "node_coords" */
370 
371 /* --------------------------------------------------------------------- */
372 /*! \note Writing of staggered magnetic fields:
373  Here we follow the same convention used by ArrayLib, where internal
374  points that lie on the grid boundary between processors are owned
375  by the \b left processor.
376  For this reason, only the leftmost processor writes one additional
377  point in the direction of staggering while the other processors
378  write the same amount of zones as for cell-centered data.
379  Also, keep in mind that the data array Vs starts from -1
380  (instead of 0) in the staggered direction. */
381 /* --------------------------------------------------------------------- */
382 
383  #ifdef STAGGERED_MHD
384  group = H5Gcreate(file_identifier, "stag_vars", 0); /* Create group "stag_vars" (staggered vars) */
385 
386  for (ns = 0; ns < DIMENSIONS; ns++) {
387 
388  /* -- skip variable if excluded from output or if it is cell-centered -- */
389 
390  if (!output->dump_var[NVAR+ns] || output->stag_var[NVAR+ns] == -1) continue;
391 
392  for (nd = 0; nd < DIMENSIONS; nd++) {
393  dimens[nd] = wgrid[nd]->np_int_glob + (ns == (DIMENSIONS-1-nd));
394  }
395  dataspace = H5Screate_simple(rank, dimens, NULL);
396 
397  #ifdef PARALLEL
398  for (nd = 0; nd < DIMENSIONS; nd++) {
399  start[nd] = wgrid[nd]->beg - wgrid[nd]->nghost;
400  stride[nd] = 1;
401  count[nd] = wgrid[nd]->np_int;
402  if (ns == DIMENSIONS-1-nd){
403  if (grid[ns].lbound != 0) count[nd] += 1;
404  else start[nd] += 1;
405  }
406  }
407  err = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET,
408  start, stride, count, NULL);
409  #endif
410 
411  for (nd = 0; nd < DIMENSIONS; nd++){
412  dimens[nd] = wgrid[nd]->np_tot + (ns==(DIMENSIONS-1-nd));
413  }
414  memspace = H5Screate_simple(rank,dimens,NULL);
415 
416  for (nd = 0; nd < DIMENSIONS; nd++){
417  start[nd] = wgrid[nd]->nghost;
418  stride[nd] = 1;
419  count[nd] = wgrid[nd]->np_int;
420  if (ns == (DIMENSIONS-1-nd) && grid[ns].lbound != 0) {
421  start[nd] -= 1;
422  count[nd] += 1;
423  }
424  }
425 
426  err = H5Sselect_hyperslab(memspace,H5S_SELECT_SET, start, stride, count, NULL);
427 
428  if (output->type == DBL_H5_OUTPUT){
429  dataset = H5Dcreate(group, output->var_name[NVAR+ns], H5T_NATIVE_DOUBLE,
430  dataspace, H5P_DEFAULT);
431 #if MPI_POSIX == NO
432  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
433  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
434  err = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
435  plist_id_mpiio, output->V[NVAR+ns][0][0]);
436 #else
437  err = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, dataspace,
438  H5P_DEFAULT, output->V[NVAR+ns][0][0]);
439 #endif
440  }else if (output->type == FLT_H5_OUTPUT){
441  void *Vpt;
442  Vpt = (void *)(Convert_dbl2flt(output->V[NVAR+ns],1.0, 0))[0][0];
443  dataset = H5Dcreate(group, output->var_name[NVAR+ns], H5T_NATIVE_FLOAT,
444  dataspace, H5P_DEFAULT);
445 
446 #if MPI_POSIX == NO
447  plist_id_mpiio = H5Pcreate(H5P_DATASET_XFER);
448  H5Pset_dxpl_mpio(plist_id_mpiio,H5FD_MPIO_COLLECTIVE);
449  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
450  dataspace, plist_id_mpiio, Vpt);
451 #else
452  err = H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace,
453  dataspace, H5P_DEFAULT, Vpt);
454 #endif
455  }
456 
457  H5Dclose(dataset);
458 #if MPI_POSIX == NO
459  H5Pclose(plist_id_mpiio);
460 #endif
461  H5Sclose(memspace);
462  H5Sclose(dataspace);
463  }
464  H5Gclose(group); /* Close group "stag_vars" */
465  #endif /* STAGGERED_MHD */
466 
467  H5Fclose(file_identifier);
468 
469 /* ----------------------------------------------------------------
470  Create XDMF file to read HDF5 output (this is convenient
471  when opening files with Visit or Paraview)
472  ---------------------------------------------------------------- */
473 
474  if (prank == 0) {
475 
476  if (output->type == DBL_H5_OUTPUT){
477  sprintf(xmfext,"dbl.xmf");
478  nprec = 8;
479  } else if (output->type == FLT_H5_OUTPUT){
480  sprintf(xmfext,"flt.xmf");
481  nprec = 4;
482  }
483 
484  sprintf (filenamexmf, "%s/data.%04d.%s", output->dir, output->nfile, xmfext);
485 
486  fxmf = fopen(filenamexmf, "w");
487  fprintf(fxmf, "<?xml version=\"1.0\" ?>\n");
488  fprintf(fxmf, "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\n");
489  fprintf(fxmf, "<Xdmf Version=\"2.0\">\n");
490  fprintf(fxmf, " <Domain>\n");
491  fprintf(fxmf, " <Grid Name=\"node_mesh\" GridType=\"Uniform\">\n");
492  fprintf(fxmf, " <Time Value=\"%12.6e\"/>\n",g_time);
493  #if DIMENSIONS == 2
494  fprintf(fxmf," <Topology TopologyType=\"2DSMesh\" NumberOfElements=\"%d %d\"/>\n",
495  wgrid[0]->np_int_glob+1, wgrid[1]->np_int_glob+1);
496  fprintf(fxmf, " <Geometry GeometryType=\"X_Y\">\n");
497  for (nd = 0; nd < DIMENSIONS; nd++) {
498  fprintf(fxmf," <DataItem Dimensions=\"%d %d\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n",
499  wgrid[0]->np_int_glob+1, wgrid[1]->np_int_glob+1 );
500  fprintf(fxmf, " %s:/node_coords/%s\n",filename,cname[nd]);
501  fprintf(fxmf, " </DataItem>\n");
502  }
503  #elif DIMENSIONS == 3
504  fprintf(fxmf, " <Topology TopologyType=\"3DSMesh\" NumberOfElements=\"%d %d %d\"/>\n",
505  wgrid[0]->np_int_glob+1, wgrid[1]->np_int_glob+1, wgrid[2]->np_int_glob+1);
506  fprintf(fxmf, " <Geometry GeometryType=\"X_Y_Z\">\n");
507  for (nd = 0; nd < DIMENSIONS; nd++) {
508  fprintf(fxmf, " <DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n",
509  wgrid[0]->np_int_glob+1, wgrid[1]->np_int_glob+1, wgrid[2]->np_int_glob+1 );
510  fprintf(fxmf, " %s:/node_coords/%s\n",filename,cname[nd]);
511  fprintf(fxmf, " </DataItem>\n");
512  }
513  #endif
514  fprintf(fxmf, " </Geometry>\n");
515  for (nv = 0; nv < output->nvar; nv++) { /* Write cell-centered variables */
516 
517  /* -- skip variable if excluded from output or if it is staggered -- */
518 
519  if (!output->dump_var[nv] || output->stag_var[nv] != -1) continue;
520 
521  fprintf(fxmf, " <Attribute Name=\"%s\" AttributeType=\"Scalar\" Center=\"Cell\">\n",
522  output->var_name[nv]);
523  #if DIMENSIONS == 2
524  fprintf(fxmf, " <DataItem Dimensions=\"%d %d\" NumberType=\"Float\" Precision=\"%d\" Format=\"HDF\">\n",
525  wgrid[0]->np_int_glob, wgrid[1]->np_int_glob, nprec);
526  #elif DIMENSIONS == 3
527  fprintf(fxmf, " <DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Precision=\"%d\" Format=\"HDF\">\n",
528  wgrid[0]->np_int_glob, wgrid[1]->np_int_glob, wgrid[2]->np_int_glob, nprec);
529  #endif
530  fprintf(fxmf, " %s:/vars/%s\n",filename,output->var_name[nv]);
531  fprintf(fxmf, " </DataItem>\n");
532  fprintf(fxmf, " </Attribute>\n");
533  }
534  for (nd = 0; nd < DIMENSIONS; nd++) { /* Write cell center coordinates (as cell-centerd variables) */
535  fprintf(fxmf, " <Attribute Name=\"%s\" AttributeType=\"Scalar\" Center=\"Cell\">\n",
536  cname[nd]);
537  #if DIMENSIONS == 2
538  fprintf(fxmf, " <DataItem Dimensions=\"%d %d\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n",
539  wgrid[0]->np_int_glob, wgrid[1]->np_int_glob);
540  #elif DIMENSIONS == 3
541  fprintf(fxmf, " <DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n",
542  wgrid[0]->np_int_glob, wgrid[1]->np_int_glob, wgrid[2]->np_int_glob);
543  #endif
544  fprintf(fxmf, " %s:/cell_coords/%s\n",filename,cname[nd]);
545  fprintf(fxmf, " </DataItem>\n");
546  fprintf(fxmf, " </Attribute>\n");
547  }
548  fprintf(fxmf, " </Grid>\n");
549  fprintf(fxmf, " </Domain>\n");
550  fprintf(fxmf, "</Xdmf>\n");
551  fclose(fxmf);
552  } /* if (prank == 0) */
553 
554 /* XDMF file */
555 
556  #ifdef PARALLEL
557  MPI_Barrier (MPI_COMM_WORLD);
558  if (prank == 0){
559  time(&tend);
560  print1 (" [%5.2f sec]",difftime(tend,tbeg));
561  }
562  #endif
563 }
#define FLT_H5_OUTPUT
Definition: pluto.h:83
static int rbound
Definition: jet_domain.c:25
double *** V[64]
pointer to arrays being written - same for all
Definition: structs.h:247
long int NX1
Number of interior zones in the X1 directions (boundaries excluded) for the local processor...
Definition: globals.h:48
void print1(const char *fmt,...)
Definition: amrPluto.cpp:511
int nfile
current number being saved - one per output
Definition: structs.h:237
int np_int_glob
Total number of points in the global domain (boundaries excluded).
Definition: structs.h:98
int rbound
Same as lbound, but for the right edge of the grid.
Definition: structs.h:112
#define SPHERICAL
Definition: pluto.h:36
char dir[256]
output directory name
Definition: structs.h:244
int prank
Processor rank.
Definition: globals.h:33
float *** Convert_dbl2flt(double ***Vdbl, double unit, int swap_endian)
Definition: bin_io.c:216
#define KDIR
Definition: pluto.h:195
char ext[8]
output extension
Definition: structs.h:243
#define POLAR
Definition: pluto.h:35
double * xl
Definition: structs.h:82
long int NX2
Number of interior zones in the X2 directions (boundaries excluded) for the local processor...
Definition: globals.h:50
#define IDIR
Definition: pluto.h:193
int beg
Global start index for the local array.
Definition: structs.h:115
#define DBL_H5_OUTPUT
Definition: pluto.h:82
Definition: structs.h:78
#define ARRAY_4D(nx, ny, nz, nv, type)
Definition: prototypes.h:173
int nghost
Number of ghost zones.
Definition: structs.h:104
int nvar
tot.
Definition: structs.h:234
double * x
Definition: structs.h:80
#define GEOMETRY
Definition: definitions_01.h:4
int * dump_var
select vars being written - one per output
Definition: structs.h:240
D_EXPAND(tot/[n]=(double) grid[IDIR].np_int_glob;, tot/[n]=(double) grid[JDIR].np_int_glob;, tot/[n]=(double) grid[KDIR].np_int_glob;)
Definition: analysis.c:27
double g_time
The current integration time.
Definition: globals.h:117
int type
output format (DBL, FLT, ...) - one per output
Definition: structs.h:233
int * stag_var
centered or staggered variable - same for all
Definition: structs.h:239
long int KBEG
Lower grid index of the computational domain in the the X3 direction for the local processor...
Definition: globals.h:43
int np_tot
Total number of points in the local domain (boundaries included).
Definition: structs.h:100
#define JDIR
Definition: pluto.h:194
long int JBEG
Lower grid index of the computational domain in the the X2 direction for the local processor...
Definition: globals.h:39
#define NVAR
Definition: pluto.h:609
#define QUIT_PLUTO(e_code)
Definition: macros.h:125
long int NX3
Number of interior zones in the X3 directions (boundaries excluded) for the local processor...
Definition: globals.h:52
char ** var_name
variable names - same for all
Definition: structs.h:242
long int IBEG
Lower grid index of the computational domain in the the X1 direction for the local processor...
Definition: globals.h:35
#define DIMENSIONS
Definition: definitions_01.h:2
int np_int
Total number of points in the local domain (boundaries excluded).
Definition: structs.h:102

Here is the call graph for this function: