PLUTO
pyPLUTO.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 import os
3 import sys
4 import array
5 import numpy as np
6 import scipy.ndimage
7 import scipy.interpolate
8 from scipy.interpolate import UnivariateSpline
9 from matplotlib.pyplot import *
10 from matplotlib.mlab import *
11 
12 ####### Check for h5py to Read AMR data ######
13 try:
14  import h5py as h5
15  hasH5 = True
16 except ImportError:
17  hasH5 = False
18 
19 def curdir():
20  """ Get the current working directory.
21  """
22  curdir = os.getcwd()+'/'
23  return curdir
24 
25 def get_nstepstr(ns):
26  """ Convert the float input *ns* into a string that would match the data file name.
27 
28  **Inputs**:
29 
30  ns -- Integer number that represents the time step number. E.g., The ns for data.0001.dbl is 1.\n
31 
32  **Outputs**:
33 
34  Returns the string that would be used to complete the data file name. E.g., for data.0001.dbl, ns = 1 and pyPLUTO.get_nstepstr(1) returns '0001'
35  """
36  nstepstr = str(ns)
37  while len(nstepstr) < 4:
38  nstepstr= '0'+nstepstr
39  return nstepstr
40 
41 def nlast_info(w_dir=None,datatype=None):
42  """ Prints the information of the last step of the simulation as obtained from out files
43 
44  **Inputs**:
45 
46  w_dir -- path to the directory which has the dbl.out(or flt.out) and the data\n
47  datatype -- If the data is of 'float' type then datatype = 'float' else by default the datatype is set to 'double'.
48 
49  **Outputs**:
50 
51  This function returns a dictionary with following keywords - \n
52 
53  nlast -- The ns for the last file saved.\n
54  time -- The simulation time for the last file saved.\n
55  dt -- The time step dt for the last file. \n
56  Nstep -- The Nstep value for the last file saved.
57 
58 
59  **Usage**:
60 
61  In case the data is 'float'.
62 
63  ``wdir = /path/to/data/directory``\n
64  ``import pyPLUTO as pp``\n
65  ``A = pp.nlast_info(w_dir=wdir,datatype='float')``
66  """
67  if w_dir is None: w_dir=curdir()
68  if datatype == 'float':
69  fname_v = w_dir+"flt.out"
70  elif datatype == 'vtk':
71  fname_v = w_dir+"vtk.out"
72  else:
73  fname_v = w_dir+"dbl.out"
74  last_line = file(fname_v,"r").readlines()[-1].split()
75  nlast = int(last_line[0])
76  SimTime = float(last_line[1])
77  Dt = float(last_line[2])
78  Nstep = int(last_line[3])
79 
80  print "------------TIME INFORMATION--------------"
81  print 'nlast =',nlast
82  print 'time =',SimTime
83  print 'dt =', Dt
84  print 'Nstep =',Nstep
85  print "-------------------------------------------"
86 
87  return {'nlast':nlast,'time':SimTime,'dt':Dt,'Nstep':Nstep}
88 
89 
90 class pload(object):
91  def __init__(self, ns, w_dir=None, datatype=None, level = 0, x1range=None, x2range=None, x3range=None):
92  """Loads the data.
93 
94  **Inputs**:
95 
96  ns -- Step Number of the data file\n
97  w_dir -- path to the directory which has the data files\n
98  datatype -- Datatype (default = 'double')
99 
100  **Outputs**:
101 
102  pyPLUTO pload object whose keys are arrays of data values.
103 
104  """
105  self.NStep = ns
106  self.Dt = 0.0
107 
108  self.n1 = 0
109  self.n2 = 0
110  self.n3 = 0
111 
112  self.x1 = []
113  self.x2 = []
114  self.x3 = []
115  self.dx1 = []
116  self.dx2 = []
117  self.dx3 = []
118 
119  self.x1range = x1range
120  self.x2range = x2range
121  self.x3range = x3range
122 
123  self.NStepStr = str(self.NStep)
124  while len(self.NStepStr) < 4:
125  self.NStepStr = '0'+self.NStepStr
126 
127  if datatype is None:
128  datatype = "double"
129  self.datatype = datatype
130 
131  if ((not hasH5) and (datatype == 'hdf5')):
132  print 'To read AMR hdf5 files with python'
133  print 'Please install h5py (Python HDF5 Reader)'
134  return
135 
136  self.level = level
137 
138  if w_dir is None:
139  w_dir = os.getcwd() + '/'
140  self.wdir = w_dir
141 
142  Data_dictionary = self.ReadDataFile(self.NStepStr)
143  for keys in Data_dictionary:
144  object.__setattr__(self, keys, Data_dictionary.get(keys))
145 
146  def ReadTimeInfo(self, timefile):
147  """ Read time info from the outfiles.
148 
149  **Inputs**:
150 
151  timefile -- name of the out file which has timing information.
152 
153  """
154 
155  if (self.datatype == 'hdf5'):
156  fh5 = h5.File(timefile,'r')
157  self.SimTime = fh5.attrs.get('time')
158  #self.Dt = 1.e-2 # Should be erased later given the level in AMR
159  fh5.close()
160  else:
161  ns = self.NStep
162  f_var = open(timefile, "r")
163  tlist = []
164  for line in f_var.readlines():
165  tlist.append(line.split())
166  self.SimTime = float(tlist[ns][1])
167  self.Dt = float(tlist[ns][2])
168 
169  def ReadVarFile(self, varfile):
170  """ Read variable names from the outfiles.
171 
172  **Inputs**:
173 
174  varfile -- name of the out file which has variable information.
175 
176  """
177  if (self.datatype == 'hdf5'):
178  fh5 = h5.File(varfile,'r')
179  self.filetype = 'single_file'
180  self.endianess = '>' # not used with AMR, kept for consistency
181  self.vars = []
182  for iv in range(fh5.attrs.get('num_components')):
183  self.vars.append(fh5.attrs.get('component_'+str(iv)))
184  fh5.close()
185  else:
186  vfp = open(varfile, "r")
187  varinfo = vfp.readline().split()
188  self.filetype = varinfo[4]
189  self.endianess = varinfo[5]
190  self.vars = varinfo[6:]
191  vfp.close()
192 
193  def ReadGridFile(self, gridfile):
194  """ Read grid values from the grid.out file.
195 
196  **Inputs**:
197 
198  gridfile -- name of the grid.out file which has information about the grid.
199 
200  """
201  xL = []
202  xR = []
203  nmax = []
204  gfp = open(gridfile, "r")
205  for i in gfp.readlines():
206  if len(i.split()) == 1:
207  try:
208  int(i.split()[0])
209  nmax.append(int(i.split()[0]))
210  except:
211  pass
212 
213  if len(i.split()) == 3:
214  try:
215  int(i.split()[0])
216  xL.append(float(i.split()[1]))
217  xR.append(float(i.split()[2]))
218  except:
219  if (i.split()[1] == 'GEOMETRY:'):
220  self.geometry=i.split()[2]
221  pass
222 
223  self.n1, self.n2, self.n3 = nmax
224  n1 = self.n1
225  n1p2 = self.n1 + self.n2
226  n1p2p3 = self.n1 + self.n2 + self.n3
227  self.x1 = np.asarray([0.5*(xL[i]+xR[i]) for i in range(n1)])
228  self.dx1 = np.asarray([(xR[i]-xL[i]) for i in range(n1)])
229  self.x2 = np.asarray([0.5*(xL[i]+xR[i]) for i in range(n1, n1p2)])
230  self.dx2 = np.asarray([(xR[i]-xL[i]) for i in range(n1, n1p2)])
231  self.x3 = np.asarray([0.5*(xL[i]+xR[i]) for i in range(n1p2, n1p2p3)])
232  self.dx3 = np.asarray([(xR[i]-xL[i]) for i in range(n1p2, n1p2p3)])
233 
234 
235  # Stores the total number of points in '_tot' variable in case only
236  # a portion of the domain is loaded. Redefine the x and dx arrays
237  # to match the requested ranges
238  self.n1_tot = self.n1 ; self.n2_tot = self.n2 ; self.n3_tot = self.n3
239  if (self.x1range != None):
240  self.n1_tot = self.n1
241  self.irange = range(abs(self.x1-self.x1range[0]).argmin(),abs(self.x1-self.x1range[1]).argmin()+1)
242  self.n1 = len(self.irange)
243  self.x1 = self.x1[self.irange]
244  self.dx1 = self.dx1[self.irange]
245  else:
246  self.irange = range(self.n1)
247  if (self.x2range != None):
248  self.n2_tot = self.n2
249  self.jrange = range(abs(self.x2-self.x2range[0]).argmin(),abs(self.x2-self.x2range[1]).argmin()+1)
250  self.n2 = len(self.jrange)
251  self.x2 = self.x2[self.jrange]
252  self.dx2 = self.dx2[self.jrange]
253  else:
254  self.jrange = range(self.n2)
255  if (self.x3range != None):
256  self.n3_tot = self.n3
257  self.krange = range(abs(self.x3-self.x3range[0]).argmin(),abs(self.x3-self.x3range[1]).argmin()+1)
258  self.n3 = len(self.krange)
259  self.x3 = self.x3[self.krange]
260  self.dx3 = self.dx3[self.krange]
261  else:
262  self.krange = range(self.n3)
263  self.Slice=(self.x1range != None) or (self.x2range != None) or (self.x3range != None)
264 
265 
266  # Create the xr arrays containing the edges positions
267  # Useful for pcolormesh which should use those
268  self.x1r = np.zeros(len(self.x1)+1) ; self.x1r[1:] = self.x1 + self.dx1/2.0 ; self.x1r[0] = self.x1r[1]-self.dx1[0]
269  self.x2r = np.zeros(len(self.x2)+1) ; self.x2r[1:] = self.x2 + self.dx2/2.0 ; self.x2r[0] = self.x2r[1]-self.dx2[0]
270  self.x3r = np.zeros(len(self.x3)+1) ; self.x3r[1:] = self.x3 + self.dx3/2.0 ; self.x3r[0] = self.x3r[1]-self.dx3[0]
271 
272 
273  prodn = self.n1*self.n2*self.n3
274  if prodn == self.n1:
275  self.nshp = (self.n1)
276  elif prodn == self.n1*self.n2:
277  self.nshp = (self.n2, self.n1)
278  else:
279  self.nshp = (self.n3, self.n2, self.n1)
280 
281 
282  def DataScanVTK(self, fp, n1, n2, n3, endian, dtype):
283  """ Scans the VTK data files.
284 
285  **Inputs**:
286 
287  fp -- Data file pointer\n
288  n1 -- No. of points in X1 direction\n
289  n2 -- No. of points in X2 direction\n
290  n3 -- No. of points in X3 direction\n
291  endian -- Endianess of the data\n
292  dtype -- datatype
293 
294  **Output**:
295 
296  Dictionary consisting of variable names as keys and its values.
297 
298  """
299  ks = []
300  vtkvar = []
301  while True:
302  l = fp.readline()
303  try:
304  l.split()[0]
305  except IndexError:
306  pass
307  else:
308  if l.split()[0] == 'SCALARS':
309  ks.append(l.split()[1])
310  elif l.split()[0] == 'LOOKUP_TABLE':
311  A = array.array(dtype)
312  fmt = endian+str(n1*n2*n3)+dtype
313  nb = np.dtype(fmt).itemsize
314  A.fromstring(fp.read(nb))
315  if (self.Slice):
316  darr = np.zeros((n1*n2*n3))
317  indxx = np.sort([n3_tot*n2_tot*k + j*n2_tot + i for i in self.irange for j in self.jrange for k in self.krange])
318  if (sys.byteorder != self.endianess):
319  A.byteswap()
320  for ii,iii in enumerate(indxx):
321  darr[ii] = A[iii]
322  vtkvar_buf = [darr]
323  else:
324  vtkvar_buf = np.frombuffer(A,dtype=np.dtype(fmt))
325  vtkvar.append(np.reshape(vtkvar_buf,self.nshp).transpose())
326  else:
327  pass
328  if l == '':
329  break
330 
331  vtkvardict = dict(zip(ks,vtkvar))
332  return vtkvardict
333 
334  def DataScanHDF5(self, fp, myvars, ilev):
335  """ Scans the Chombo HDF5 data files for AMR in PLUTO.
336 
337  **Inputs**:
338 
339  fp -- Data file pointer\n
340  myvars -- Names of the variables to read\n
341  ilev -- required AMR level
342 
343  **Output**:
344 
345  Dictionary consisting of variable names as keys and its values.
346 
347  **Note**:
348 
349  Due to the particularity of AMR, the grid arrays loaded in ReadGridFile are overwritten here.
350 
351  """
352  # Read the grid information
353  dim = fp['Chombo_global'].attrs.get('SpaceDim')
354  nlev = fp.attrs.get('num_levels')
355  il = min(nlev-1,ilev)
356  lev = []
357  for i in range(nlev):
358  lev.append('level_'+str(i))
359  freb = np.zeros(nlev,dtype='int')
360  for i in range(il+1)[::-1]:
361  fl = fp[lev[i]]
362  if (i == il):
363  pdom = fl.attrs.get('prob_domain')
364  dx = fl.attrs.get('dx')
365  dt = fl.attrs.get('dt')
366  ystr = 1. ; zstr = 1. ; logr = 0
367  try:
368  geom = fl.attrs.get('geometry')
369  logr = fl.attrs.get('logr')
370  if (dim == 2):
371  ystr = fl.attrs.get('g_x2stretch')
372  elif (dim == 3):
373  zstr = fl.attrs.get('g_x3stretch')
374  except:
375  print 'Old HDF5 file, not reading stretch and logr factors'
376  freb[i] = 1
377  x1b = fl.attrs.get('domBeg1')
378  if (dim == 1):
379  x2b = 0
380  else:
381  x2b = fl.attrs.get('domBeg2')
382  if (dim == 1 or dim == 2):
383  x3b = 0
384  else:
385  x3b = fl.attrs.get('domBeg3')
386  jbeg = 0 ; jend = 0 ; ny = 1
387  kbeg = 0 ; kend = 0 ; nz = 1
388  if (dim == 1):
389  ibeg = pdom[0] ; iend = pdom[1] ; nx = iend-ibeg+1
390  elif (dim == 2):
391  ibeg = pdom[0] ; iend = pdom[2] ; nx = iend-ibeg+1
392  jbeg = pdom[1] ; jend = pdom[3] ; ny = jend-jbeg+1
393  elif (dim == 3):
394  ibeg = pdom[0] ; iend = pdom[3] ; nx = iend-ibeg+1
395  jbeg = pdom[1] ; jend = pdom[4] ; ny = jend-jbeg+1
396  kbeg = pdom[2] ; kend = pdom[5] ; nz = kend-kbeg+1
397  else:
398  rat = fl.attrs.get('ref_ratio')
399  freb[i] = rat*freb[i+1]
400 
401  dx0 = dx*freb[0]
402 
403  ## Allow to load only a portion of the domain
404  if (self.x1range != None):
405  if logr == 0:
406  self.x1range = self.x1range-x1b
407  else:
408  self.x1range = [log(self.x1range[0]/x1b),log(self.x1range[1]/x1b)]
409  ibeg0 = min(self.x1range)/dx0 ; iend0 = max(self.x1range)/dx0
410  ibeg = max([ibeg, int(ibeg0*freb[0])]) ; iend = min([iend,int(iend0*freb[0]-1)])
411  nx = iend-ibeg+1
412  if (self.x2range != None):
413  self.x2range = (self.x2range-x2b)/ystr
414  jbeg0 = min(self.x2range)/dx0 ; jend0 = max(self.x2range)/dx0
415  jbeg = max([jbeg, int(jbeg0*freb[0])]) ; jend = min([jend,int(jend0*freb[0]-1)])
416  ny = jend-jbeg+1
417  if (self.x3range != None):
418  self.x3range = (self.x3range-x3b)/zstr
419  kbeg0 = min(self.x3range)/dx0 ; kend0 = max(self.x3range)/dx0
420  kbeg = max([kbeg, int(kbeg0*freb[0])]) ; kend = min([kend,int(kend0*freb[0]-1)])
421  nz = kend-kbeg+1
422 
423  ## Create uniform grids at the required level
424  if logr == 0:
425  x1 = x1b + (ibeg+np.array(range(nx))+0.5)*dx
426  else:
427  x1 = x1b*(exp((ibeg+np.array(range(nx))+1)*dx)+exp((ibeg+np.array(range(nx)))*dx))*0.5
428 
429  x2 = x2b + (jbeg+np.array(range(ny))+0.5)*dx*ystr
430  x3 = x3b + (kbeg+np.array(range(nz))+0.5)*dx*zstr
431  if logr == 0:
432  dx1 = np.ones(nx)*dx
433  else:
434  dx1 = x1b*(exp((ibeg+np.array(range(nx))+1)*dx)-exp((ibeg+np.array(range(nx)))*dx))
435  dx2 = np.ones(ny)*dx*ystr
436  dx3 = np.ones(nz)*dx*zstr
437 
438  # Create the xr arrays containing the edges positions
439  # Useful for pcolormesh which should use those
440  x1r = np.zeros(len(x1)+1) ; x1r[1:] = x1 + dx1/2.0 ; x1r[0] = x1r[1]-dx1[0]
441  x2r = np.zeros(len(x2)+1) ; x2r[1:] = x2 + dx2/2.0 ; x2r[0] = x2r[1]-dx2[0]
442  x3r = np.zeros(len(x3)+1) ; x3r[1:] = x3 + dx3/2.0 ; x3r[0] = x3r[1]-dx3[0]
443  NewGridDict = dict([('n1',nx),('n2',ny),('n3',nz),\
444  ('x1',x1),('x2',x2),('x3',x3),\
445  ('x1r',x1r),('x2r',x2r),('x3r',x3r),\
446  ('dx1',dx1),('dx2',dx2),('dx3',dx3),\
447  ('Dt',dt)])
448 
449  # Variables table
450  nvar = len(myvars)
451  vars = np.zeros((nx,ny,nz,nvar))
452 
453  LevelDic = {'nbox':0,'ibeg':ibeg,'iend':iend,'jbeg':jbeg,'jend':jend,'kbeg':kbeg,'kend':kend}
454  AMRLevel = []
455  AMRBoxes = np.zeros((nx,ny,nz))
456  for i in range(il+1):
457  AMRLevel.append(LevelDic.copy())
458  fl = fp[lev[i]]
459  data = fl['data:datatype=0']
460  boxes = fl['boxes']
461  nbox = len(boxes['lo_i'])
462  AMRLevel[i]['nbox'] = nbox
463  ncount = 0L
464  AMRLevel[i]['box']=[]
465  for j in range(nbox): # loop on all boxes of a given level
466  AMRLevel[i]['box'].append({'x0':0.,'x1':0.,'ib':0L,'ie':0L,\
467  'y0':0.,'y1':0.,'jb':0L,'je':0L,\
468  'z0':0.,'z1':0.,'kb':0L,'ke':0L})
469  # Box indexes
470  ib = boxes[j]['lo_i'] ; ie = boxes[j]['hi_i'] ; nbx = ie-ib+1
471  jb = 0 ; je = 0 ; nby = 1
472  kb = 0 ; ke = 0 ; nbz = 1
473  if (dim > 1):
474  jb = boxes[j]['lo_j'] ; je = boxes[j]['hi_j'] ; nby = je-jb+1
475  if (dim > 2):
476  kb = boxes[j]['lo_k'] ; ke = boxes[j]['hi_k'] ; nbz = ke-kb+1
477  szb = nbx*nby*nbz*nvar
478  # Rescale to current level
479  kb = kb*freb[i] ; ke = (ke+1)*freb[i] - 1
480  jb = jb*freb[i] ; je = (je+1)*freb[i] - 1
481  ib = ib*freb[i] ; ie = (ie+1)*freb[i] - 1
482 
483  # Skip boxes lying outside ranges
484  if ((ib > iend) or (ie < ibeg) or \
485  (jb > jend) or (je < jbeg) or \
486  (kb > kend) or (ke < kbeg)):
487  ncount = ncount + szb
488  else:
489 
490  ### Read data
491  q = data[ncount:ncount+szb].reshape((nvar,nbz,nby,nbx)).T
492 
493  ### Find boxes intersections with current domain ranges
494  ib0 = max([ibeg,ib]) ; ie0 = min([iend,ie])
495  jb0 = max([jbeg,jb]) ; je0 = min([jend,je])
496  kb0 = max([kbeg,kb]) ; ke0 = min([kend,ke])
497 
498  ### Store box corners in the AMRLevel structure
499  if logr == 0:
500  AMRLevel[i]['box'][j]['x0'] = x1b + dx*(ib0)
501  AMRLevel[i]['box'][j]['x1'] = x1b + dx*(ie0+1)
502  else:
503  AMRLevel[i]['box'][j]['x0'] = x1b*exp(dx*(ib0))
504  AMRLevel[i]['box'][j]['x1'] = x1b*exp(dx*(ie0+1))
505  AMRLevel[i]['box'][j]['y0'] = x2b + dx*(jb0)*ystr
506  AMRLevel[i]['box'][j]['y1'] = x2b + dx*(je0+1)*ystr
507  AMRLevel[i]['box'][j]['z0'] = x3b + dx*(kb0)*zstr
508  AMRLevel[i]['box'][j]['z1'] = x3b + dx*(ke0+1)*zstr
509  AMRLevel[i]['box'][j]['ib'] = ib0 ; AMRLevel[i]['box'][j]['ie'] = ie0
510  AMRLevel[i]['box'][j]['jb'] = jb0 ; AMRLevel[i]['box'][j]['je'] = je0
511  AMRLevel[i]['box'][j]['kb'] = kb0 ; AMRLevel[i]['box'][j]['ke'] = ke0
512  AMRBoxes[ib0-ibeg:ie0-ibeg+1, jb0-jbeg:je0-jbeg+1, kb0-kbeg:ke0-kbeg+1] = il
513 
514  ### Extract the box intersection from data stored in q
515  cib0 = (ib0-ib)/freb[i] ; cie0 = (ie0-ib)/freb[i]
516  cjb0 = (jb0-jb)/freb[i] ; cje0 = (je0-jb)/freb[i]
517  ckb0 = (kb0-kb)/freb[i] ; cke0 = (ke0-kb)/freb[i]
518  q1 = np.zeros((cie0-cib0+1, cje0-cjb0+1, cke0-ckb0+1,nvar))
519  q1 = q[cib0:cie0+1,cjb0:cje0+1,ckb0:cke0+1,:]
520 
521  # Remap the extracted portion
522  if (dim == 1):
523  new_shape = (ie0-ib0+1,1)
524  elif (dim == 2):
525  new_shape = (ie0-ib0+1,je0-jb0+1)
526  else:
527  new_shape = (ie0-ib0+1,je0-jb0+1,ke0-kb0+1)
528 
529  stmp = list(new_shape)
530  while stmp.count(1) > 0:
531  stmp.remove(1)
532  new_shape = tuple(stmp)
533 
534  myT = Tools()
535  for iv in range(nvar):
536  vars[ib0-ibeg:ie0-ibeg+1,jb0-jbeg:je0-jbeg+1,kb0-kbeg:ke0-kbeg+1,iv] = \
537  myT.congrid(q1[:,:,:,iv].squeeze(),new_shape,method='linear',minusone=True).reshape((ie0-ib0+1,je0-jb0+1,ke0-kb0+1))
538  ncount = ncount+szb
539 
540  h5vardict={}
541  for iv in range(nvar):
542  h5vardict[myvars[iv]] = vars[:,:,:,iv].squeeze()
543  AMRdict = dict([('AMRBoxes',AMRBoxes),('AMRLevel',AMRLevel)])
544  OutDict = dict(NewGridDict)
545  OutDict.update(AMRdict)
546  OutDict.update(h5vardict)
547  return OutDict
548 
549 
550  def DataScan(self, fp, n1, n2, n3, endian, dtype, off=None):
551  """ Scans the data files in all formats.
552 
553  **Inputs**:
554 
555  fp -- Data file pointer\n
556  n1 -- No. of points in X1 direction\n
557  n2 -- No. of points in X2 direction\n
558  n3 -- No. of points in X3 direction\n
559  endian -- Endianess of the data\n
560  dtype -- datatype, eg : double, float, vtk, hdf5\n
561  off -- offset (for avoiding staggered B fields)
562 
563  **Output**:
564 
565  Dictionary consisting of variable names as keys and its values.
566 
567  """
568  if off is not None:
569  off_fmt = endian+str(off)+dtype
570  nboff = np.dtype(off_fmt).itemsize
571  fp.read(nboff)
572 
573  n1_tot = self.n1_tot ; n2_tot = self.n2_tot; n3_tot = self.n3_tot
574 
575  A = array.array(dtype)
576  fmt = endian+str(n1_tot*n2_tot*n3_tot)+dtype
577  nb = np.dtype(fmt).itemsize
578  A.fromstring(fp.read(nb))
579 
580  if (self.Slice):
581  darr = np.zeros((n1*n2*n3))
582  indxx = np.sort([n3_tot*n2_tot*k + j*n2_tot + i for i in self.irange for j in self.jrange for k in self.krange])
583  if (sys.byteorder != self.endianess):
584  A.byteswap()
585  for ii,iii in enumerate(indxx):
586  darr[ii] = A[iii]
587  darr = [darr]
588  else:
589  darr = np.frombuffer(A,dtype=np.dtype(fmt))
590 
591  return np.reshape(darr[0],self.nshp).transpose()
592 
593  def ReadSingleFile(self, datafilename, myvars, n1, n2, n3, endian,
594  dtype, ddict):
595  """Reads a single data file, data.****.dtype.
596 
597  **Inputs**:
598 
599  datafilename -- Data file name\n
600  myvars -- List of variable names to be read\n
601  n1 -- No. of points in X1 direction\n
602  n2 -- No. of points in X2 direction\n
603  n3 -- No. of points in X3 direction\n
604  endian -- Endianess of the data\n
605  dtype -- datatype\n
606  ddict -- Dictionary containing Grid and Time Information
607  which is updated
608 
609  **Output**:
610 
611  Updated Dictionary consisting of variable names as keys and its values.
612  """
613  if self.datatype == 'hdf5':
614  fp = h5.File(datafilename,'r')
615  else:
616  fp = open(datafilename, "rb")
617 
618  print "Reading Data file : %s"%datafilename
619 
620  if self.datatype == 'vtk':
621  vtkd = self.DataScanVTK(fp, n1, n2, n3, endian, dtype)
622  ddict.update(vtkd)
623  elif self.datatype == 'hdf5':
624  h5d = self.DataScanHDF5(fp,myvars,self.level)
625  ddict.update(h5d)
626  else:
627  for i in range(len(myvars)):
628  if myvars[i] == 'bx1s':
629  ddict.update({myvars[i]: self.DataScan(fp, n1, n2, n3, endian,
630  dtype, off=n2*n3)})
631  elif myvars[i] == 'bx2s':
632  ddict.update({myvars[i]: self.DataScan(fp, n1, n2, n3, endian,
633  dtype, off=n3*n1)})
634  elif myvars[i] == 'bx3s':
635  ddict.update({myvars[i]: self.DataScan(fp, n1, n2, n3, endian,
636  dtype, off=n1*n2)})
637  else:
638  ddict.update({myvars[i]: self.DataScan(fp, n1, n2, n3, endian,
639  dtype)})
640 
641 
642  fp.close()
643 
644  def ReadMultipleFiles(self, nstr, dataext, myvars, n1, n2, n3, endian,
645  dtype, ddict):
646  """Reads a multiple data files, varname.****.dataext.
647 
648  **Inputs**:
649 
650  nstr -- File number in form of a string\n
651  dataext -- Data type of the file, e.g., 'dbl', 'flt' or 'vtk' \n
652  myvars -- List of variable names to be read\n
653  n1 -- No. of points in X1 direction\n
654  n2 -- No. of points in X2 direction\n
655  n3 -- No. of points in X3 direction\n
656  endian -- Endianess of the data\n
657  dtype -- datatype\n
658  ddict -- Dictionary containing Grid and Time Information
659  which is updated.
660 
661  **Output**:
662 
663  Updated Dictionary consisting of variable names as keys and its values.
664 
665  """
666  for i in range(len(myvars)):
667  datafilename = self.wdir+myvars[i]+"."+nstr+dataext
668  fp = open(datafilename, "rb")
669  if self.datatype == 'vtk':
670  ddict.update(self.DataScanVTK(fp, n1, n2, n3, endian, dtype))
671  else:
672  ddict.update({myvars[i]: self.DataScan(fp, n1, n2, n3, endian,
673  dtype)})
674  fp.close()
675 
676  def ReadDataFile(self, num):
677  """Reads the data file generated from PLUTO code.
678 
679  **Inputs**:
680 
681  num -- Data file number in form of an Integer.
682 
683  **Outputs**:
684 
685  Dictionary that contains all information about Grid, Time and
686  variables.
687 
688  """
689  gridfile = self.wdir+"grid.out"
690  if self.datatype == "float":
691  dtype = "f"
692  varfile = self.wdir+"flt.out"
693  dataext = ".flt"
694  elif self.datatype == "vtk":
695  dtype = "f"
696  varfile = self.wdir+"vtk.out"
697  dataext=".vtk"
698  elif self.datatype == 'hdf5':
699  dtype = 'd'
700  dataext = '.hdf5'
701  nstr = num
702  varfile = self.wdir+"data."+nstr+dataext
703  else:
704  dtype = "d"
705  varfile = self.wdir+"dbl.out"
706  dataext = ".dbl"
707 
708  self.ReadVarFile(varfile)
709  self.ReadGridFile(gridfile)
710  self.ReadTimeInfo(varfile)
711  nstr = num
712  if self.endianess == 'big':
713  endian = ">"
714  elif self.datatype == 'vtk':
715  endian = ">"
716  else:
717  endian = "<"
718 
719  D = [('NStep', self.NStep), ('SimTime', self.SimTime), ('Dt', self.Dt),
720  ('n1', self.n1), ('n2', self.n2), ('n3', self.n3),
721  ('x1', self.x1), ('x2', self.x2), ('x3', self.x3),
722  ('dx1', self.dx1), ('dx2', self.dx2), ('dx3', self.dx3),
723  ('endianess', self.endianess), ('datatype', self.datatype),
724  ('filetype', self.filetype)]
725  ddict = dict(D)
726 
727  if self.filetype == "single_file":
728  datafilename = self.wdir+"data."+nstr+dataext
729  self.ReadSingleFile(datafilename, self.vars, self.n1, self.n2,
730  self.n3, endian, dtype, ddict)
731  elif self.filetype == "multiple_files":
732  self.ReadMultipleFiles(nstr, dataext, self.vars, self.n1, self.n2,
733  self.n3, endian, dtype, ddict)
734  else:
735  print "Wrong file type : CHECK pluto.ini for file type."
736  print "Only supported are .dbl, .flt, .vtk, .hdf5"
737  sys.exit()
738 
739  return ddict
740 
741 
742 class Tools(object):
743  """
744 
745  This Class has all the functions doing basic mathematical
746  operations to the vector or scalar fields.
747  It is called after pyPLUTO.pload object is defined.
748 
749  """
750 
751  def deriv(self,Y,X=None):
752  """
753  Calculates the derivative of Y with respect to X.
754 
755  **Inputs:**
756 
757  Y : 1-D array to be differentiated.\n
758  X : 1-D array with len(X) = len(Y).\n
759 
760  If X is not specified then by default X is chosen to be an equally spaced array having same number of elements
761  as Y.
762 
763  **Outputs:**
764 
765  This returns an 1-D array having the same no. of elements as Y (or X) and contains the values of dY/dX.
766 
767  """
768  n = len(Y)
769  n2 = n-2
770  if X==None : X = np.arange(n)
771  Xarr = np.asarray(X,dtype='float')
772  Yarr = np.asarray(Y,dtype='float')
773  x12 = Xarr - np.roll(Xarr,-1) #x1 - x2
774  x01 = np.roll(Xarr,1) - Xarr #x0 - x1
775  x02 = np.roll(Xarr,1) - np.roll(Xarr,-1) #x0 - x2
776  DfDx = np.roll(Yarr,1) * (x12 / (x01*x02)) + Yarr * (1./x12 - 1./x01) - np.roll(Yarr,-1) * (x01 / (x02 * x12))
777  # Formulae for the first and last points:
778 
779  DfDx[0] = Yarr[0] * (x01[1]+x02[1])/(x01[1]*x02[1]) - Yarr[1] * x02[1]/(x01[1]*x12[1]) + Yarr[2] * x01[1]/(x02[1]*x12[1])
780  DfDx[n-1] = -Yarr[n-3] * x12[n2]/(x01[n2]*x02[n2]) + Yarr[n-2]*x02[n2]/(x01[n2]*x12[n2]) - Yarr[n-1]*(x02[n2]+x12[n2])/(x02[n2]*x12[n2])
781 
782  return DfDx
783 
784  def Grad(self,phi,x1,x2,dx1,dx2,polar=False):
785  """ This method calculates the gradient of the 2D scalar phi.
786 
787  **Inputs:**
788 
789  phi -- 2D scalar whose gradient is to be determined.\n
790  x1 -- The 'x' array\n
791  x2 -- The 'y' array\n
792  dx1 -- The grid spacing in 'x' direction.\n
793  dx2 -- The grid spacing in 'y' direction.\n
794  polar -- The keyword should be set to True inorder to estimate the Gradient in polar co-ordinates. By default it is set to False.
795 
796  **Outputs:**
797 
798  This routine outputs a 3D array with shape = (len(x1),len(x2),2), such that [:,:,0] element corresponds to the gradient values of phi wrt to x1 and [:,:,1] are the gradient values of phi wrt to x2.
799 
800  """
801  (n1, n2) = phi.shape
802  grad_phi = np.zeros(shape=(n1,n2,2))
803  h2 = np.ones(shape=(n1,n2))
804  if polar == True:
805  for j in range(n2):
806  h2[:,j] = x1
807 
808  for i in range(n1):
809  scrh1 = phi[i,:]
810  grad_phi[i,:,1] = self.deriv(scrh1,x2)/h2[i,:]
811  for j in range(n2):
812  scrh2 = phi[:,j]
813  grad_phi[:,j,0] = self.deriv(scrh2,x1)
814 
815  return grad_phi
816 
817  def Div(self,u1,u2,x1,x2,dx1,dx2,geometry=None):
818  """ This method calculates the divergence of the 2D vector fields u1 and u2.
819 
820  **Inputs:**
821 
822  u1 -- 2D vector along x1 whose divergence is to be determined.\n
823  u2 -- 2D vector along x2 whose divergence is to be determined.\n
824  x1 -- The 'x' array\n
825  x2 -- The 'y' array\n
826  dx1 -- The grid spacing in 'x' direction.\n
827  dx2 -- The grid spacing in 'y' direction.\n
828  geometry -- The keyword *geometry* is by default set to 'cartesian'. It can be set to either one of the following : *cartesian*, *cylindrical*, *spherical* or *polar*. To calculate the divergence of the vector fields, respective geometric corrections are taken into account based on the value of this keyword.
829 
830  **Outputs:**
831 
832  A 2D array with same shape as u1(or u2) having the values of divergence.
833 
834  """
835  (n1, n2) = u1.shape
836  Divergence = np.zeros(shape=(n1,n2))
837  du1 = np.zeros(shape=(n1,n2))
838  du2 = np.zeros(shape=(n1,n2))
839 
840  A1 = np.zeros(shape=n1)
841  A2 = np.zeros(shape=n2)
842 
843  dV1 = np.zeros(shape=(n1,n2))
844  dV2 = np.zeros(shape=(n1,n2))
845 
846  if geometry == None : geometry = 'cartesian'
847 
848  #------------------------------------------------
849  # define area and volume elements for the
850  # different coordinate systems
851  #------------------------------------------------
852 
853  if geometry == 'cartesian' :
854  A1[:] = 1.0
855  A2[:] = 1.0
856  dV1 = np.outer(dx1,A2)
857  dV2 = np.outer(A1,dx2)
858 
859  if geometry == 'cylindrical' :
860  A1 = x1
861  A2[:] = 1.0
862  dV1 = np.meshgrid(x1*dx1,A2)[0].T*np.meshgrid(x1*dx1,A2)[1].T
863  for i in range(n1) : dV2[i,:] = dx2[:]
864 
865  if geometry == 'polar' :
866  A1 = x1
867  A2[:] = 1.0
868  dV1 = np.meshgrid(x1,A2)[0].T*np.meshgrid(x1,A2)[1].T
869  dV2 = np.meshgrid(x1,dx2)[0].T*np.meshgrid(x1,dx2)[1].T
870 
871  if geometry == 'spherical' :
872  A1 = x1*x1
873  A2 = np.sin(x2)
874  for j in range(n2): dV1[:,j] = A1*dx1
875  dV2 = np.meshgrid(x1,np.sin(x2)*dx2)[0].T*np.meshgrid(x1,np.sin(x2)*dx2)[1].T
876 
877  # ------------------------------------------------
878  # Make divergence
879  # ------------------------------------------------
880 
881 
882  for i in range(1,n1-1):
883  du1[i,:] = 0.5*(A1[i+1]*u1[i+1,:] - A1[i-1]*u1[i-1,:])/dV1[i,:]
884  for j in range(1,n2-1):
885  du2[:,j] = 0.5*(A2[j+1]*u2[:,j+1] - A2[j-1]*u2[:,j-1])/dV2[:,j]
886 
887  Divergence = du1 + du2
888  return Divergence
889 
890 
891 
892  def RTh2Cyl(self,R,Th,X1,X2):
893  """ This method does the transformation from spherical coordinates to cylindrical ones.
894 
895  **Inputs:**
896 
897  R - 2D array of spherical radius coordinates.\n
898  Th - 2D array of spherical theta-angle coordinates.\n
899  X1 - 2D array of radial component of given vector\n
900  X2 - 2D array of thetoidal component of given vector\n
901 
902  **Outputs:**
903 
904  This routine outputs two 2D arrays after transformation.
905 
906  **Usage:**
907 
908  ``import pyPLUTO as pp``\n
909  ``import numpy as np``\n
910  ``D = pp.pload(0)``\n
911  ``ppt=pp.Tools()``\n
912  ``TH,R=np.meshgrid(D.x2,D.x1)``\n
913  ``Br,Bz=ppt.RTh2Cyl(R,TH,D.bx1,D.bx2)``
914 
915  D.bx1 and D.bx2 should be vectors in spherical coordinates. After transformation (Br,Bz) corresponds to vector in cilindrical coordinates.
916 
917 
918  """
919  Y1=X1*np.sin(Th)+X2*np.cos(Th)
920  Y2=X1*np.cos(Th)-X2*np.sin(Th)
921  return Y1,Y2
922 
923 
924 
925 
926  def myInterpol(self,RR,N):
927  """ This method interpolates (linear interpolation) vector 1D vector RR to 1D N-length vector. Useful for stretched grid calculations.
928 
929  **Inputs:**
930 
931  RR - 1D array to interpolate.\n
932  N - Number of grids to interpolate to.\n
933 
934  **Outputs:**
935 
936  This routine outputs interpolated 1D array to the new grid (len=N).
937 
938  **Usage:**
939 
940  ``import pyPLUTO as pp``\n
941  ``import numpy as np``\n
942  ``D = pp.pload(0)``\n
943  ``ppt=pp.Tools()``\n
944  ``x=linspace(0,1,10) #len(x)=10``\n
945  ``y=x*x``\n
946  ``Ri,Ni=ppt.myInterpol(y,100) #len(Ri)=100``
947 
948  Ri - interpolated numbers;
949  Ni - grid for Ri
950 
951  """
952 
953  NN=np.linspace(0,len(RR)-1,len(RR))
954  spline_fit=UnivariateSpline(RR,NN,k=3,s=0)
955 
956  RRi=np.linspace(RR[0],RR[-1],N)
957  NNi=spline_fit(RRi)
958  NNi[0]=NN[0]+0.00001
959  NNi[-1]=NN[-1]-0.00001
960  return RRi,NNi
961 
962  def getUniformGrid(self,r,th,rho,Nr,Nth):
963  """ This method transforms data with non-uniform grid (stretched) to uniform. Useful for stretched grid calculations.
964 
965  **Inputs:**
966 
967  r - 1D vector of X1 coordinate (could be any, e.g D.x1).\n
968  th - 1D vector of X2 coordinate (could be any, e.g D.x2).\n
969  rho- 2D array of data.\n
970  Nr - new size of X1 vector.\n
971  Nth- new size of X2 vector.\n
972 
973  **Outputs:**
974 
975  This routine outputs 2D uniform array Nr x Nth dimension
976 
977  **Usage:**
978 
979  ``import pyPLUTO as pp``\n
980  ``import numpy as np``\n
981  ``D = pp.pload(0)``\n
982  ``ppt=pp.Tools()``\n
983  ``X1new, X2new, res = ppt.getUniformGrid(D.x1,D.x2,D.rho,20,30)``
984 
985  X1new - X1 interpolated grid len(X1new)=20
986  X2new - X2 interpolated grid len(X2new)=30
987  res - 2D array of interpolated variable
988 
989  """
990 
991  Ri,NRi=self.myInterpol(r,Nr)
992  Ra=np.int32(NRi);Wr=NRi-Ra
993 
994  YY=np.ones([Nr,len(th)])
995  for i in range(len(th)):
996  YY[:,i]=(1-Wr)*rho[Ra,i] + Wr*rho[Ra+1,i]
997 
998  THi,NTHi=self.myInterpol(th,Nth)
999  THa=np.int32(NTHi);Wth=NTHi-THa
1000 
1001  ZZ=np.ones([Nr,Nth])
1002  for i in range(Nr):
1003  ZZ[i,:]=(1-Wth)*YY[i,THa] + Wth*YY[i,THa+1]
1004 
1005  return Ri,THi,ZZ
1006 
1007  def sph2cyl(self,D,Dx,rphi=None,theta0=None):
1008  """ This method transforms spherical data into cylindrical applying interpolation. Works for stretched grid as well, transforms poloidal (R-Theta) data by default. Fix theta and set rphi=True to get (R-Phi) transformation.
1009 
1010  **Inputs:**
1011 
1012  D - structure from 'pload' method.\n
1013  Dx - variable to be transformed (D.rho for example).\n
1014 
1015  **Outputs:**
1016 
1017  This routine outputs transformed (sph->cyl) variable and grid.
1018 
1019  **Usage:**
1020 
1021  ``import pyPLUTO as pp``\n
1022  ``import numpy as np``\n
1023  ``D = pp.pload(0)``\n
1024  ``ppt=pp.Tools()``\n
1025  ``R,Z,res = ppt.sph2cyl(D,D.rho.transpose())``
1026 
1027  R - 2D array with cylindrical radius values
1028  Z - 2D array with cylindrical Z values
1029  res - 2D array of transformed variable
1030 
1031  """
1032 
1033  if rphi is None or rphi == False:
1034  rx=D.x1
1035  th=D.x2
1036  else:
1037  rx=D.x1*np.sin(theta0)
1038  th=D.x3
1039 
1040  rx,th,Dx=self.getUniformGrid(rx,th,Dx.T,200,200)
1041  Dx=Dx.T
1042 
1043  if rphi is None or rphi == False:
1044 
1045  r0=np.min(np.sin(th)*rx[0])
1046  rN=rx[-1]
1047  dr=rN-r0
1048  z0=np.min(np.cos(th)*rN)
1049  zN=np.max(np.cos(th)*rN)
1050  dz=zN-z0
1051  dth=th[-1]-th[0]
1052  rl=np.int32(len(rx)*dr/(rx[-1]-rx[0]))
1053  zl=np.int32(rl* dz/dr)
1054  thl=len(th)
1055  r=np.linspace(r0, rN, rl)
1056  z=np.linspace(z0, zN, zl)
1057  else:
1058  r0=np.min([np.sin(th)*rx[0] , np.sin(th)*rx[-1]])
1059  rN=np.max([np.sin(th)*rx[0] , np.sin(th)*rx[-1]])
1060  dr=rN-r0
1061  z0=np.min(np.cos(th)*rN)
1062  zN=np.max(np.cos(th)*rN)
1063  dz=zN-z0
1064  dth=th[-1]-th[0]
1065  rl=np.int32(len(rx)*dr/(rx[-1]-rx[0]))
1066  zl=np.int32(rl* dz/dr)
1067  thl=len(th)
1068  r=np.linspace(r0, rN, rl)
1069  z=np.linspace(z0, zN, zl)
1070 
1071  R,Z = np.meshgrid(r, z)
1072  Rs = np.sqrt(R*R + Z*Z)
1073 
1074 
1075  Th = np.arccos(Z/Rs)
1076  kv_34=find(R<0)
1077  Th.flat[kv_34]=2*np.pi - Th.flat[kv_34]
1078 
1079 
1080  ddr=rx[1]-rx[0]
1081  ddth=th[1]-th[0]
1082 
1083  Rs_copy=Rs.copy()
1084  Th_copy=Th.copy()
1085 
1086  nR1=find(Rs<rx[0])
1087  Rs.flat[nR1]=rx[0]
1088  nR2=find(Rs>rN)
1089  Rs.flat[nR2]=rN
1090 
1091  nTh1=find(Th>th[-1])
1092  Th.flat[nTh1]=th[-1]
1093  nTh2=find(Th<th[0])
1094  Th.flat[nTh2]=th[0]
1095 
1096 
1097  ra = ((len(rx)-1.001)/(np.max(Rs.flat)-np.min(Rs.flat)) *(Rs-np.min(Rs.flat)))
1098  tha = ((thl-1.001)/dth *(Th-th[0]))
1099 
1100  rn = np.int32(ra)
1101  thn = np.int32(tha)
1102  dra=ra-rn
1103  dtha=tha-thn
1104  w1=1-dra
1105  w2=dra
1106  w3=1-dtha
1107  w4=dtha
1108  lrx=len(rx)
1109  NN1=np.int32(rn+thn*lrx)
1110  NN2=np.int32((rn+1)+thn*lrx)
1111  NN3=np.int32(rn+(thn+1)*lrx)
1112  NN4=np.int32((rn+1)+(thn+1)*lrx)
1113  n=np.transpose(np.arange(0,np.product(np.shape(R))))
1114  DD=Dx.copy()
1115  F=R.copy()
1116  F.flat[n]=w1.flat[n]*(w3.flat[n]*Dx.flat[NN1.flat[n]] + w4.flat[n]*Dx.flat[NN3.flat[n]] )+\
1117  w2.flat[n]*(w3.flat[n]*Dx.flat[NN2.flat[n]] + w4.flat[n]*Dx.flat[NN4.flat[n]] )
1118 
1119  nR1=find(Rs_copy<rx[0]-ddr/1.5)
1120  nR2=find(Rs_copy>rN+ddr/1.5)
1121  nTh1=find(Th_copy>th[-1]+ddth/1.5)
1122  nTh2=find(Th_copy<th[0]-ddth/1.5)
1123 
1124  nmask=np.concatenate((nR1,nR2,nTh1,nTh2))
1125  F.flat[nmask]=np.nan;
1126  return R,Z,F
1127 
1128 
1129  def congrid(self, a, newdims, method='linear', centre=False, minusone=False):
1130  """
1131  Arbitrary resampling of source array to new dimension sizes.
1132  Currently only supports maintaining the same number of dimensions.
1133  To use 1-D arrays, first promote them to shape (x,1).
1134 
1135  Uses the same parameters and creates the same co-ordinate lookup points
1136  as IDL''s congrid routine, which apparently originally came from a VAX/VMS
1137  routine of the same name.
1138 
1139  **Inputs:**
1140 
1141  a -- The 2D array that needs resampling into new dimensions.\n
1142  newdims -- A tuple which represents the shape of the resampled data\n
1143  method -- This keyword decides the method used for interpolation.\n
1144  neighbour - closest value from original data\n
1145  nearest and linear - uses n x 1-D interpolations using scipy.interpolate.interp1d
1146  (see Numerical Recipes for validity of use of n 1-D interpolations)\n
1147  spline - uses ndimage.map_coordinates\n
1148  centre -- This keyword decides the positions of interpolation points.\n
1149  True - interpolation points are at the centres of the bins\n
1150  False - points are at the front edge of the bin\n
1151  minusone -- This prevents extrapolation one element beyond bounds of input array\n
1152  For example- inarray.shape = (i,j) & new dimensions = (x,y)\n
1153  False - inarray is resampled by factors of (i/x) * (j/y)\n
1154  True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)\n
1155 
1156  **Outputs:**
1157 
1158  A 2D array with resampled data having a shape corresponding to newdims.
1159 
1160  """
1161  if not a.dtype in [np.float64, np.float32]:
1162  a = np.cast[float](a)
1163 
1164  m1 = np.cast[int](minusone)
1165  ofs = np.cast[int](centre) * 0.5
1166  old = np.array( a.shape )
1167  ndims = len( a.shape )
1168  if len( newdims ) != ndims:
1169  print "[congrid] dimensions error. " \
1170  "This routine currently only support " \
1171  "rebinning to the same number of dimensions."
1172  return None
1173  newdims = np.asarray( newdims, dtype=float )
1174  dimlist = []
1175 
1176  if method == 'neighbour':
1177  for i in range( ndims ):
1178  base = np.indices(newdims)[i]
1179  dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
1180  * (base + ofs) - ofs )
1181  cd = np.array( dimlist ).round().astype(int)
1182  newa = a[list( cd )]
1183  return newa
1184 
1185  elif method in ['nearest','linear']:
1186  # calculate new dims
1187  for i in range( ndims ):
1188  base = np.arange( newdims[i] )
1189  dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
1190  * (base + ofs) - ofs )
1191  # specify old dims
1192  olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
1193 
1194  # first interpolation - for ndims = any
1195  mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
1196  newa = mint( dimlist[-1] )
1197 
1198  trorder = [ndims - 1] + range( ndims - 1 )
1199  for i in range( ndims - 2, -1, -1 ):
1200  newa = newa.transpose( trorder )
1201 
1202  mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
1203  newa = mint( dimlist[i] )
1204 
1205  if ndims > 1:
1206  # need one more transpose to return to original dimensions
1207  newa = newa.transpose( trorder )
1208 
1209  return newa
1210  elif method in ['spline']:
1211  oslices = [ slice(0,j) for j in old ]
1212  oldcoords = np.ogrid[oslices]
1213  nslices = [ slice(0,j) for j in list(newdims) ]
1214  newcoords = np.mgrid[nslices]
1215 
1216  newcoords_dims = range(n.rank(newcoords))
1217  #make first index last
1218  newcoords_dims.append(newcoords_dims.pop(0))
1219  newcoords_tr = newcoords.transpose(newcoords_dims)
1220  # makes a view that affects newcoords
1221 
1222  newcoords_tr += ofs
1223 
1224  deltas = (np.asarray(old) - m1) / (newdims - m1)
1225  newcoords_tr *= deltas
1226 
1227  newcoords_tr -= ofs
1228 
1229  newa = scipy.ndimage.map_coordinates(a, newcoords)
1230  return newa
1231  else:
1232  print "Congrid error: Unrecognized interpolation type.\n", \
1233  "Currently only \'neighbour\', \'nearest\',\'linear\',", \
1234  "and \'spline\' are supported."
1235  return None
1236 
1237 
1238 
1239 
1240 
1241 
1242 
1243 
1244 
1245 
1246 
1247 class Image(object):
1248  ''' This Class has all the routines for the imaging the data
1249  and plotting various contours and fieldlines on these images.
1250  CALLED AFTER pyPLUTO.pload object is defined
1251  '''
1252  def pldisplay(self, D, var,**kwargs):
1253  """ This method allows the user to display a 2D data using the
1254  matplotlib's pcolormesh.
1255 
1256  **Inputs:**
1257 
1258  D -- pyPLUTO pload object.\n
1259  var -- 2D array that needs to be displayed.
1260 
1261  *Required Keywords:*
1262 
1263  x1 -- The 'x' array\n
1264  x2 -- The 'y' array
1265 
1266  *Optional Keywords:*
1267 
1268  vmin -- The minimum value of the 2D array (Default : min(var))\n
1269  vmax -- The maximum value of the 2D array (Default : max(var))\n
1270  title -- Sets the title of the image.\n
1271  label1 -- Sets the X Label (Default: 'XLabel')\n
1272  label2 -- Sets the Y Label (Default: 'YLabel')\n
1273  polar -- A list to project Polar data on Cartesian Grid.\n
1274  polar = [True, True] -- Projects r-phi plane.\n
1275  polar = [True, False] -- Project r-theta plane.\n
1276  polar = [False, False] -- No polar plot (Default)\n
1277  cbar -- Its a tuple to set the colorbar on or off. \n
1278  cbar = (True,'vertical') -- Displays a vertical colorbar\n
1279  cbar = (True,'horizontal') -- Displays a horizontal colorbar\n
1280  cbar = (False,'') -- Displays no colorbar.
1281 
1282  **Usage:**
1283 
1284  ``import pyPLUTO as pp``\n
1285  ``wdir = '/path/to/the data files/'``\n
1286  ``D = pp.pload(1,w_dir=wdir)``\n
1287  ``I = pp.Image()``\n
1288  ``I.pldisplay(D, D.v2, x1=D.x1, x2=D.x2, cbar=(True,'vertical'),\
1289  title='Velocity',label1='Radius',label2='Height')``
1290  """
1291  x1 = kwargs.get('x1')
1292  x2 = kwargs.get('x2')
1293  var = var.T
1294 
1295  f1 = figure(kwargs.get('fignum',1), figsize=kwargs.get('figsize',[10,10]),
1296  dpi=80, facecolor='w', edgecolor='k')
1297  ax1 = f1.add_subplot(111)
1298  ax1.set_aspect('equal')
1299 
1300  if kwargs.get('polar',[False,False])[0]:
1301  xx, yy = self.getPolarData(D,kwargs.get('x2'),rphi=kwargs.get('polar')[1])
1302  pcolormesh(xx,yy,var,vmin=kwargs.get('vmin',np.min(var)),vmax=kwargs.get('vmax',np.max(var)))
1303  else:
1304  ax1.axis([np.min(x1),np.max(x1),np.min(x2),np.max(x2)])
1305  pcolormesh(x1,x2,var,vmin=kwargs.get('vmin',np.min(var)),vmax=kwargs.get('vmax',np.max(var)))
1306 
1307  title(kwargs.get('title',"Title"),size=kwargs.get('size'))
1308  xlabel(kwargs.get('label1',"Xlabel"),size=kwargs.get('size'))
1309  ylabel(kwargs.get('label2',"Ylabel"),size=kwargs.get('size'))
1310  if kwargs.get('cbar',(False,''))[0] == True:
1311  colorbar(orientation=kwargs.get('cbar')[1])
1312 
1313 
1314  def multi_disp(self,*args,**kwargs):
1315  mvar = []
1316  for arg in args:
1317  mvar.append(arg.T)
1318 
1319  xmin = np.min(kwargs.get('x1'))
1320  xmax = np.max(kwargs.get('x1'))
1321  ymin = np.min(kwargs.get('x2'))
1322  ymax = np.max(kwargs.get('x2'))
1323  mfig = figure(kwargs.get('fignum',1),figsize=kwargs.get('figsize',[10,10]))
1324  Ncols = kwargs.get('Ncols')
1325  Nrows = len(args)/Ncols
1326  mprod = Nrows*Ncols
1327  dictcbar=kwargs.get('cbar',(False,'','each'))
1328 
1329  for j in range(mprod):
1330  mfig.add_subplot(Nrows,Ncols,j+1)
1331  pcolormesh(kwargs.get('x1'),kwargs.get('x2'), mvar[j])
1332  axis([xmin,xmax,ymin,ymax])
1333  gca().set_aspect('equal')
1334 
1335  xlabel(kwargs.get('label1',mprod*['Xlabel'])[j])
1336  ylabel(kwargs.get('label2',mprod*['Ylabel'])[j])
1337  title(kwargs.get('title',mprod*['Title'])[j])
1338  if (dictcbar[0] == True) and (dictcbar[2] =='each'):
1339  colorbar(orientation=kwargs.get('cbar')[1])
1340  if dictcbar[0] == True and dictcbar[2]=='last':
1341  if (j == np.max(range(mprod))):colorbar(orientation=kwargs.get('cbar')[1])
1342 
1343  def oplotbox(self, AMRLevel, lrange=[0,0], cval=['b','r','g','m','w','k'],\
1344  islice=-1, jslice=-1, kslice=-1,geom='CARTESIAN'):
1345  """
1346  This method overplots the AMR boxes up to the specified level.
1347 
1348  **Input:**
1349 
1350  AMRLevel -- AMR object loaded during the reading and stored in the pload object
1351 
1352  *Optional Keywords:*
1353 
1354  lrange -- [level_min,level_max] to be overplotted. By default it shows all the loaded levels\n
1355  cval -- list of colors for the levels to be overplotted.\n
1356  [ijk]slice -- Index of the 2D slice to look for so that the adequate box limits are plotted.
1357  By default oplotbox considers you are plotting a 2D slice of the z=min(x3) plane.\n
1358  geom -- Specified the geometry. Currently, CARTESIAN (default) and POLAR geometries are handled.
1359  """
1360 
1361  nlev = len(AMRLevel)
1362  lrange[1] = min(lrange[1],nlev-1)
1363  npl = lrange[1]-lrange[0]+1
1364  lpls = [lrange[0]+v for v in range(npl)]
1365  cols = cval[0:nlev]
1366  # Get the offset and the type of slice
1367  Slice = 0 ; inds = 'k'
1368  xx = 'x' ; yy ='y'
1369  if (islice >= 0):
1370  Slice = islice + AMRLevel[0]['ibeg'] ; inds = 'i'
1371  xx = 'y' ; yy ='z'
1372  if (jslice >= 0):
1373  Slice = jslice + AMRLevel[0]['jbeg'] ; inds = 'j'
1374  xx = 'x' ; yy ='z'
1375  if (kslice >= 0):
1376  Slice = kslice + AMRLevel[0]['kbeg'] ; inds = 'k'
1377  xx = 'x' ; yy ='y'
1378 
1379  # Overplot the boxes
1380  hold(True)
1381  for il in lpls:
1382  level = AMRLevel[il]
1383  for ib in range(level['nbox']):
1384  box = level['box'][ib]
1385  if ((Slice-box[inds+'b'])*(box[inds+'e']-Slice) >= 0):
1386  if (geom == 'CARTESIAN'):
1387  x0 = box[xx+'0'] ; x1 = box[xx+'1']
1388  y0 = box[yy+'0'] ; y1 = box[yy+'1']
1389  plot([x0,x1,x1,x0,x0],[y0,y0,y1,y1,y0],color=cols[il])
1390  elif (geom == 'POLAR') or (geom == 'SPHERICAL'):
1391  dn = np.pi/50.
1392  x0 = box[xx+'0'] ; x1 = box[xx+'1']
1393  y0 = box[yy+'0'] ; y1 = box[yy+'1']
1394  if y0 == y1:
1395  y1 = 2*np.pi+y0 - 1.e-3
1396  xb = np.concatenate([
1397  [x0*np.cos(y0),x1*np.cos(y0)],\
1398  x1*np.cos(np.linspace(y0,y1,num=int(abs(y0-y1)/dn) )),\
1399  [x1*np.cos(y1),x0*np.cos(y1)],\
1400  x0*np.cos(np.linspace(y1,y0,num=int(abs(y0-y1)/dn)))])
1401  yb = np.concatenate([
1402  [x0*np.sin(y0),x1*np.sin(y0)],\
1403  x1*np.sin(np.linspace(y0,y1,num=int(abs(y0-y1)/dn))),\
1404  [x1*np.sin(y1),x0*np.sin(y1)],\
1405  x0*np.sin(np.linspace(y1,y0,num=int(abs(y0-y1)/dn)))])
1406  plot(xb,yb,color=cols[il])
1407 
1408  hold(False)
1409 
1410  def field_interp(self,var1,var2,x,y,dx,dy,xp,yp):
1411  """ This method interpolates value of vector fields (var1 and var2) on field points (xp and yp).
1412  The field points are obtained from the method field_line.
1413 
1414  **Inputs:**
1415 
1416  var1 -- 2D Vector field in X direction\n
1417  var2 -- 2D Vector field in Y direction\n
1418  x -- 1D X array\n
1419  y -- 1D Y array\n
1420  dx -- 1D grid spacing array in X direction\n
1421  dy -- 1D grid spacing array in Y direction\n
1422  xp -- field point in X direction\n
1423  yp -- field point in Y direction\n
1424 
1425  **Outputs:**
1426 
1427  A list with 2 elements where the first element corresponds to the interpolate field
1428  point in 'x' direction and the second element is the field point in 'y' direction.
1429 
1430  """
1431  q=[]
1432  U = var1
1433  V = var2
1434  i0 = np.abs(xp-x).argmin()
1435  j0 = np.abs(yp-y).argmin()
1436  scrhUx = np.interp(xp,x,U[:,j0])
1437  scrhUy = np.interp(yp,y,U[i0,:])
1438  q.append(scrhUx + scrhUy - U[i0,j0])
1439  scrhVx = np.interp(xp,x,V[:,j0])
1440  scrhVy = np.interp(yp,y,V[i0,:])
1441  q.append(scrhVx + scrhVy - V[i0,j0])
1442  return q
1443 
1444  def field_line(self,var1,var2,x,y,dx,dy,x0,y0):
1445  """ This method is used to obtain field lines (same as fieldline.pro in PLUTO IDL tools).
1446 
1447  **Inputs:**
1448 
1449  var1 -- 2D Vector field in X direction\n
1450  var2 -- 2D Vector field in Y direction\n
1451  x -- 1D X array\n
1452  y -- 1D Y array\n
1453  dx -- 1D grid spacing array in X direction\n
1454  dy -- 1D grid spacing array in Y direction\n
1455  x0 -- foot point of the field line in X direction\n
1456  y0 -- foot point of the field line in Y direction\n
1457 
1458  **Outputs:**
1459 
1460  This routine returns a dictionary with keys - \n
1461  qx -- list of the field points along the 'x' direction.
1462  qy -- list of the field points along the 'y' direction.
1463 
1464  **Usage:**
1465 
1466  See the myfieldlines routine for the same.
1467  """
1468  xbeg = x[0] - 0.5*dx[0]
1469  xend = x[-1] + 0.5*dx[-1]
1470 
1471  ybeg = y[0] - 0.5*dy[0]
1472  yend = y[-1] + 0.5*dy[-1]
1473 
1474  inside_domain = x0 > xbeg and x0 < xend and y0 > ybeg and y0 < yend
1475 
1476  MAX_STEPS = 5000
1477  xln_fwd = [x0]
1478  yln_fwd = [y0]
1479  xln_bck = [x0]
1480  yln_bck = [y0]
1481  rhs = []
1482  k = 0
1483 
1484  while inside_domain == True:
1485  R1 = self.field_interp(var1,var2,x,y,dx,dy,xln_fwd[k],yln_fwd[k])
1486  dl = 0.5*np.max(np.concatenate((dx,dy)))/(np.sqrt(R1[0]*R1[0] + R1[1]*R1[1] + 1.e-14))
1487  xscrh = xln_fwd[k] + 0.5*dl*R1[0]
1488  yscrh = yln_fwd[k] + 0.5*dl*R1[1]
1489 
1490  R2 = self.field_interp(var1,var2,x,y,dx,dy,xln_fwd[k],yln_fwd[k])
1491 
1492  xln_one = xln_fwd[k] + dl*R2[0]
1493  yln_one = yln_fwd[k] + dl*R2[1]
1494 
1495  xln_fwd.append(xln_one)
1496  yln_fwd.append(yln_one)
1497  inside_domain = xln_one > xbeg and xln_one < xend and yln_one > ybeg and yln_one < yend
1498  inside_domain = inside_domain and (k < MAX_STEPS-3)
1499  k = k + 1
1500 
1501 
1502  k_fwd = k
1503  qx = np.asarray(xln_fwd[0:k_fwd])
1504  qy = np.asarray(yln_fwd[0:k_fwd])
1505  flines={'qx':qx,'qy':qy}
1506 
1507  return flines
1508 
1509 
1510  def myfieldlines(self,Data,x0arr,y0arr,stream=False,**kwargs):
1511  """ This method overplots the magnetic field lines at the footpoints given by (x0arr[i],y0arr[i]).
1512 
1513  **Inputs:**
1514 
1515  Data -- pyPLUTO.pload object\n
1516  x0arr -- array of x co-ordinates of the footpoints\n
1517  y0arr -- array of y co-ordinates of the footpoints\n
1518  stream -- keyword for two different ways of calculating the field lines.\n
1519  True -- plots contours of rAphi (needs to store vector potential)\n
1520  False -- plots the fieldlines obtained from the field_line routine. (Default option)
1521 
1522  *Optional Keywords:*
1523 
1524  colors -- A list of matplotlib colors to represent the lines. The length of this list should be same as that of x0arr.\n
1525  lw -- Integer value that determines the linewidth of each line.\n
1526  ls -- Determines the linestyle of each line.
1527 
1528  **Usage:**
1529 
1530  Assume that the magnetic field is a given as **B** = B0$\hat{y}$.
1531  Then to show this field lines we have to define the x and y arrays of field foot points.\n
1532 
1533  ``x0arr = linspace(0.0,10.0,20)``\n
1534  ``y0arr = linspace(0.0,0.0,20)``\n
1535  ``import pyPLUTO as pp``\n
1536  ``D = pp.pload(45)``\n
1537  ``I = pp.Image()``\n
1538  ``I.myfieldlines(D,x0arr,y0arr,colors='k',ls='--',lw=1.0)``
1539  """
1540 
1541  if len(x0arr) != len(y0arr) : print "Input Arrays should have same size"
1542  QxList=[]
1543  QyList=[]
1544  StreamFunction = []
1545  levels =[]
1546  if stream == True:
1547  X, Y = np.meshgrid(Data.x1,Data.x2.T)
1548  StreamFunction = X*(Data.Ax3.T)
1549  for i in range(len(x0arr)):
1550  nx = np.abs(X[0,:]-x0arr[i]).argmin()
1551  ny = np.abs(X[:,0]-y0arr[i]).argmin()
1552  levels.append(X[ny,nx]*Data.Ax3.T[ny,nx])
1553 
1554  contour(X,Y,StreamFunction,levels,colors=kwargs.get('colors'),linewidths=kwargs.get('lw',1),linestyles=kwargs.get('ls','solid'))
1555  else:
1556  for i in range(len(x0arr)):
1557  QxList.append(self.field_line(Data.bx1,Data.bx2,Data.x1,Data.x2,Data.dx1,Data.dx1,x0arr[i],y0arr[i]).get('qx'))
1558  QyList.append(self.field_line(Data.bx1,Data.bx2,Data.x1,Data.x2,Data.dx1,Data.dx1,x0arr[i],y0arr[i]).get('qy'))
1559  plot(QxList[i],QyList[i],color=kwargs.get('colors'))
1560  axis([min(Data.x1),max(Data.x1),min(Data.x2),max(Data.x2)])
1561 
1562  def getSphData(self,Data,w_dir=None,datatype=None,**kwargs):
1563  """This method transforms the vector and scalar fields from Spherical co-ordinates to Cylindrical.
1564 
1565  **Inputs**:
1566 
1567  Data -- pyPLUTO.pload object\n
1568  w_dir -- /path/to/the/working/directory/\n
1569  datatype -- If the data is of 'float' type then datatype = 'float' else by default the datatype is set to 'double'.
1570 
1571  *Optional Keywords*:
1572 
1573  rphi -- [Default] is set to False implies that the r-theta plane is transformed. If set True then the r-phi plane is transformed.\n
1574  x2cut -- Applicable for 3D data and it determines the co-ordinate of the x2 plane while r-phi is set to True.\n
1575  x3cut -- Applicable for 3D data and it determines the co-ordinate of the x3 plane while r-phi is set to False.
1576 
1577  """
1578 
1579  Tool = Tools()
1580  key_value_pairs = []
1581  allvars = []
1582  if w_dir is None: w_dir = curdir()
1583  for v in Data.vars:
1584  allvars.append(v)
1585 
1586  if kwargs.get('rphi',False)==True:
1587  R,TH = np.meshgrid(Data.x1,Data.x3)
1588  if Data.n3 != 1:
1589  for variable in allvars:
1590  key_value_pairs.append([variable,getattr(Data,variable)[:,kwargs.get('x2cut',0),:].T])
1591 
1592  SphData = dict(key_value_pairs)
1593  if ('bx1' in allvars) or ('bx2' in allvars):
1594  (SphData['b1c'],SphData['b3c']) = Tool.RTh2Cyl(R,TH,SphData.get('bx1'),SphData.get('bx3'))
1595  allvars.append('b1c')
1596  allvars.append('b3c')
1597  if ('vx1' in allvars) or ('vx2' in allvars):
1598  (SphData['v1c'],SphData['v3c']) = Tool.RTh2Cyl(R,TH,SphData.get('vx1'),SphData.get('vx3'))
1599  allvars.append('v1c')
1600  allvars.append('v3c')
1601  else:
1602  print "No x3 plane for 2D data"
1603  else:
1604  R,TH = np.meshgrid(Data.x1,Data.x2)
1605  if Data.n3 != 1:
1606  for variable in allvars:
1607  key_value_pairs.append([variable,getattr(Data,variable)[:,:,kwargs.get('x3cut',0)].T])
1608  SphData = dict(key_value_pairs)
1609  if ('bx1' in allvars) or ('bx2' in allvars):
1610  (SphData['b1c'],SphData['b2c']) = Tool.RTh2Cyl(R,TH,SphData.get('bx1'),SphData.get('bx2'))
1611  allvars.append('b1c')
1612  allvars.append('b2c')
1613  if ('vx1' in allvars) or ('vx2' in allvars):
1614  (SphData['v1c'],SphData['v2c']) = Tool.RTh2Cyl(R,TH,SphData.get('vx1'),SphData.get('vx2'))
1615  allvars.append('v1c')
1616  allvars.append('v2c')
1617  else:
1618  for variable in allvars:
1619  key_value_pairs.append([variable,getattr(Data,variable)[:,:].T])
1620  SphData = dict(key_value_pairs)
1621  if ('bx1' in allvars) or ('bx2' in allvars):
1622  (SphData['b1c'],SphData['b2c']) = Tool.RTh2Cyl(R,TH,SphData.get('bx1'),SphData.get('bx2'))
1623  allvars.append('b1c')
1624  allvars.append('b2c')
1625  if ('vx1' in allvars) or ('vx2' in allvars):
1626  (SphData['v1c'],SphData['v2c']) = Tool.RTh2Cyl(R,TH,SphData.get('vx1'),SphData.get('vx2'))
1627  allvars.append('v1c')
1628  allvars.append('v2c')
1629 
1630  for variable in allvars:
1631  if kwargs.get('rphi',False)==True:
1632  R,Z,SphData[variable]= Tool.sph2cyl(Data,SphData.get(variable),rphi=True,theta0=Data.x2[kwargs.get('x2cut',0)])
1633  else:
1634  if Data.n3 != 1:
1635  R,Z,SphData[variable] = Tool.sph2cyl(Data,SphData.get(variable),rphi=False)
1636  else:
1637  R,Z,SphData[variable] = Tool.sph2cyl(Data,SphData.get(variable),rphi=False)
1638 
1639  return R,Z,SphData
1640 
1641 
1642  def getPolarData(self, Data, ang_coord, rphi=False):
1643  """To get the Cartesian Co-ordinates from Polar.
1644 
1645  **Inputs:**
1646 
1647  Data -- pyPLUTO pload Object\n
1648  ang_coord -- The Angular co-ordinate (theta or Phi)
1649 
1650  *Optional Keywords:*
1651 
1652  rphi -- Default value FALSE is for R-THETA data,
1653  Set TRUE for R-PHI data.\n
1654 
1655  **Outputs**:
1656 
1657  2D Arrays of X, Y from the Radius and Angular co-ordinates.\n
1658  They are used in pcolormesh in the Image.pldisplay functions.
1659  """
1660  D = Data
1661  if ang_coord is D.x2:
1662  x2r = D.x2r
1663  elif ang_coord is D.x3:
1664  x2r = D.x3r
1665  else:
1666  print "Angular co-ordinate must be given"
1667 
1668  rcos = np.outer(np.cos(x2r), D.x1r)
1669  rsin = np.outer(np.sin(x2r), D.x1r)
1670 
1671  if rphi:
1672  xx = rcos
1673  yy = rsin
1674  else:
1675  xx = rsin
1676  yy = rcos
1677 
1678  return xx, yy
1679 
1680  def pltSphData(self,Data,w_dir=None,datatype=None,**kwargs):
1681  """This method plots the transformed data obtained from getSphData using the matplotlib's imshow
1682 
1683  **Inputs:**
1684 
1685  Data -- pyPLUTO.pload object\n
1686  w_dir -- /path/to/the/working/directory/\n
1687  datatype -- Datatype.
1688 
1689  *Required Keywords*:
1690 
1691  plvar -- A string which represents the plot variable.\n
1692 
1693  *Optional Keywords*:
1694 
1695  logvar -- [Default = False] Set it True for plotting the log of a variable.\n
1696  rphi -- [Default = False - for plotting in r-theta plane] Set it True for plotting the variable in r-phi plane.
1697 
1698  """
1699 
1700  if w_dir is None: w_dir=curdir()
1701  R,Z,SphData = self.getSphData(Data,w_dir=w_dir,datatype=datatype,**kwargs)
1702 
1703  extent=(np.min(R.flat),max(R.flat),np.min(Z.flat),max(Z.flat))
1704  dRR=max(R.flat)-np.min(R.flat)
1705  dZZ=max(Z.flat)-np.min(Z.flat)
1706 
1707 
1708  isnotnan=-np.isnan(SphData[kwargs.get('plvar')])
1709  maxPl=max(SphData[kwargs.get('plvar')][isnotnan].flat)
1710  minPl=np.min(SphData[kwargs.get('plvar')][isnotnan].flat)
1711  normrange=False
1712  if minPl<0:
1713  normrange=True
1714  if maxPl>-minPl:
1715  minPl=-maxPl
1716  else:
1717  maxPl=-minPl
1718  if (normrange and kwargs.get('plvar')!='rho' and kwargs.get('plvar')!='prs'):
1719  SphData[kwargs.get('plvar')][-1][-1]=maxPl
1720  SphData[kwargs.get('plvar')][-1][-2]=minPl
1721 
1722  if (kwargs.get('logvar') == True):
1723  SphData[kwargs.get('plvar')] = np.log10(SphData[kwargs.get('plvar')])
1724 
1725  imshow(SphData[kwargs.get('plvar')], aspect='equal', origin='lower', cmap=cm.jet,extent=extent, interpolation='nearest')
1726 
1727 
1728 
1729 
1730 
1731 
1732 
1733 
1734 
1735 
1736 
1737 
1738 
1739 
1740 
1741 
1742 
1743 
1744 
1745 
1746 
1747 
1748 
1749 
1750 
1751 
1752 
1753 
1754 
1755 
1756 
1757 
1758 
1759 
1760 
1761 
1762 
1763 
1764 
1765 
1766 
1767 
1768 
1769 
1770 
1771 
1772 
1773 
1774 
1775 
1776 
1777 
1778 
def RTh2Cyl(self, R, Th, X1, X2)
Definition: pyPLUTO.py:892
def myfieldlines(self, Data, x0arr, y0arr, stream=False, kwargs)
Definition: pyPLUTO.py:1510
def ReadSingleFile(self, datafilename, myvars, n1, n2, n3, endian, dtype, ddict)
Definition: pyPLUTO.py:594
def getUniformGrid(self, r, th, rho, Nr, Nth)
Definition: pyPLUTO.py:962
def get_nstepstr(ns)
Definition: pyPLUTO.py:25
def ReadGridFile(self, gridfile)
Definition: pyPLUTO.py:193
def ReadVarFile(self, varfile)
Definition: pyPLUTO.py:169
def field_line(self, var1, var2, x, y, dx, dy, x0, y0)
Definition: pyPLUTO.py:1444
def ReadMultipleFiles(self, nstr, dataext, myvars, n1, n2, n3, endian, dtype, ddict)
Definition: pyPLUTO.py:645
def field_interp(self, var1, var2, x, y, dx, dy, xp, yp)
Definition: pyPLUTO.py:1410
def nlast_info
Definition: pyPLUTO.py:41
x1range
Allow to load only a portion of the domain.
Definition: pyPLUTO.py:119
def getSphData(self, Data, w_dir=None, datatype=None, kwargs)
Definition: pyPLUTO.py:1562
def DataScanVTK(self, fp, n1, n2, n3, endian, dtype)
Definition: pyPLUTO.py:282
string title
Definition: jet.py:14
def curdir()
Definition: pyPLUTO.py:19
def find(fname, str, action, want)
Definition: file.py:150
def pltSphData(self, Data, w_dir=None, datatype=None, kwargs)
Definition: pyPLUTO.py:1680
def DataScanHDF5(self, fp, myvars, ilev)
Definition: pyPLUTO.py:334
def ReadTimeInfo(self, timefile)
Definition: pyPLUTO.py:146
def ReadDataFile(self, num)
Definition: pyPLUTO.py:676
def pldisplay(self, D, var, kwargs)
Definition: pyPLUTO.py:1252
Definition: file.py:1
def multi_disp(self, args, kwargs)
Definition: pyPLUTO.py:1314
double max
Definition: analysis.c:5
def myInterpol(self, RR, N)
Definition: pyPLUTO.py:926