core.py
Go to the documentation of this file.
1 import numpy as np
2 import pandas as pd
3 import h5py
4 # from progressbar import ProgressBar
5 import time
6 from PandAna.core.filesource import *
7 import os
8 
9 # How to index the data
10 KL = ['run', 'subrun', 'cycle', 'evt', 'subevt']
11 KLN = ['run', 'subrun', 'cycle', 'evt']
12 KLS = ['run', 'subrun', 'evt']
13 
14 class spectrum():
15  def __init__(self, tables, cut, var, weight=None, name=None):
16  self._name = name
17 
18  # save the var and cut functions so we can call __init__ during fill
19  self._varfcn = var
20  self._cutfcn = cut
21  self._weightfcn = weight
22 
23  # keep a reference to loader for fill
24  self._tables = tables
25 
26  # associate this spectrum, cut with loader for filling
27  tables.add_spectrum(self)
28 
29  # compute cut, var, and weights
30  # tables is an empty cache of all the necessary branches and leaves initially
31  # after tables.Go() the branches and leaves are filled with data from the files given
32  self._cut = cut(self._tables)
33  self._df = var(self._tables)
34  self._df = self._df.dropna()
35 
36  # initial weights are all 1
37  self._weight = pd.Series(1, self._df.index, name='weight')
38  if weight:
39  # apply all the weights
40  if type(weight) is list:
41  for w in weight:
42  self._weight = w(tables, self._weight)
43  else: self._weight = weight(tables, self._weight)
44 
45  def fill(self):
46  # loader.Go() has been called
47  self.__init__(self._tables, self._cutfcn, self._varfcn, weight=self._weightfcn, name=self._name)
48 
49  # Just to be sure...
50  assert np.array_equal(self._df.index, self._weight.index), 'var and weights have different rows'
51 
52  # reset tables global index
53  self._tables.reset_index()
54 
55  # Set dataframe name if desired
56  if self._name: self._df = self._df.rename(self._name)
57 
58  # Grab spectrum POT from tables
59  self._POT = self._tables._POT
60 
61  def POT(self):
62  return self._POT
63 
64  def df(self):
65  return self._df
66 
67  def weight(self):
68  return self._weight
69 
70  def histogram(self, bins=None, range=None, POT=None):
71  if not POT: POT = self._POT
72  n, bins = np.histogram(self._df, bins, range, weights = self._weight)
73  return n*POT/self._POT, bins
74 
75  def entries(self):
76  return self._df.shape[0]
77 
78  def integral(self,POT=None):
79  if not POT: POT = self._POT
80  return self._weight.sum()*POT/self._POT
81 
82  def to_text(self, fileName, sep=' ', header=False):
83  self._df.to_csv(fileName, sep=sep, index=True, header=header)
84 
85  def __add__(self, b):
86  df = pd.concat([self._df, b._df])
87  pot = self._POT + b._POT
88  return filledSpectrum(df, pot)
89 
90  def __and__(self, b):
91  assert self._POT == b._POT, "Spectra should have same POT"
92  if self._weight is not None and b._weight is not None:
93  assert np.array_equal(self._weight, b._weight), "Spectra should have same weights"
94 
95  df = pd.concat([self._df, b._df], axis=1)
96  pot = self._POT
97  weight = self._weight
98  return filledSpectrum(df, pot, weight=weight)
99 
100 
101 # For constructing spectra without having to fill
103  def __init__(self, df, pot, weight=None):
104  self._df = df
105  self._POT = pot
106 
107  if weight is not None:
108  self._weight = weight
109  else:
110  self._weight = pd.Series(1, self._df.index, name='weight')
111 
112  def fill(self):
113  print('This spectrum was constructed already filled.')
114 
115 # Save spectra to an hdf5 file. Takes a single or a list of spectra
116 def save_spectra(fname, spectra, groups):
117  if not type(spectra) is list: spectra = [spectra]
118  if not type(groups) is list : groups = [groups]
119  assert len(spectra)==len(groups), 'Each spectrum must have a group name.'
120 
121  # idk why we are giving things to the store
122  store = pd.HDFStore(fname, 'w')
123 
124  for spectrum, group in zip(spectra, groups):
125  print(spectrum.df().head())
126  store[group+'/dataframe'] = spectrum.df()
127  store.get_storer(group+'/dataframe').attrs.pot = spectrum.POT()
128  store[group+'/weights'] = spectrum.weight()
129 
130  store.close()
131 
132 # alternate save data function that doesn't utilise pytables
133 def save_tree(fname, spectra, groups, attrs=True):
134  if not type(spectra) is list: spectra = [spectra]
135  if not type(groups) is list : groups = [groups]
136  assert len(spectra)==len(groups), 'Each spectrum must have a group name.'
137 
138  f = h5py.File(fname, 'w')
139  for spectrum, group in zip(spectra, groups):
140  g = f.create_group(group)
141  df = spectrum.df()
142  vals = df.values
143  ismap = 'map' in group
144  if ismap:
145  for i in range(len(vals)):
146  vals[i] = vals[i].reshape(1, vals[i].shape[0])
147  vals = np.stack(np.concatenate(vals), axis = 0)
148 
149  g.create_dataset('df', data=vals)
150  if type(df) == pd.Series:
151  g.create_dataset('name', data=[df.name])
152  else:
153  g.create_dataset('name', data=df.columns.values.astype('S'))
154  if attrs:
155  g.create_dataset('pot', data=spectrum.POT())
156  g.create_dataset('weights', data=spectrum.weight())
157  index = df.index.names
158  indexdf = df.reset_index()
159  for name in index:
160  g.create_dataset(name, data=indexdf[name].values)
161 
162  f.close()
163 # Load spectra from a file. Takes one or a list of group names to read
164 def load_spectra(fname, groups):
165  if not type(groups) is list: groups = [groups]
166 
167  # ah that's more like it
168  store = pd.HDFStore(fname, 'r')
169 
170  ret = []
171  for group in groups:
172  df = store[group+'/dataframe']
173  pot = store.get_storer(group+'/dataframe').attrs.pot
174  weight = store[group+'/weights']
175 
176  ret.append(filledSpectrum(df, pot, weight=weight))
177 
178  store.close()
179 
180  if len(groups) == 1: return ret[0]
181  return ret
182 
183 def load_tree(fname, groups, attrs=True):
184  if not type(groups) is list: groups = [groups]
185  f = h5py.File(fname, 'r')
186  ret = []
187  for group in groups:
188  data = f[group+'/df'][()]
189  spec = {}
190  name = {}
191  cols = ['df0']
192  if len(data.shape) > 1:
193  cols = ['df'+str(i) for i in range(data.shape[1])]
194  coldata = [data]
195  if len(data.shape) > 1 and data.shape[1] != 1:
196  coldata = [data[:,i] for i in range(data.shape[1])]
197  spec = dict(zip(cols, coldata))
198  index = []
199  for key in f[group].keys():
200  if key != 'pot' and key != 'weights' and key != 'df' and key != 'name':
201  spec[key] = f[group+'/'+key][()]
202  index.append(key)
203  df = pd.DataFrame(spec)
204  df.set_index(index, inplace=True)
205  if 'name' in f[group].keys():
206  name = f[group+'/name'][()]
207  df.columns = name
208 
209  if attrs:
210  pot = f[group+'/pot'][()]
211  weights = pd.Series(f[group+'/weights'][()], df.index, name='weight')
212  ret.append(filledSpectrum(df, pot, weight=weights))
213  else:
214  ret.append(df)
215 
216  f.close()
217  return dict(zip(groups, ret))
218 
219 class Var():
220  def __init__(self, var):
221  self._var = var
222 
223  def __call__(self, tables):
224  return self._var(tables)
225 
226  def __eq__(self, val):
227  return Cut(lambda tables: self(tables) == val)
228 
229  def __ne__(self, val):
230  return Cut(lambda tables: self(tables) != val)
231 
232  def __lt__(self, val):
233  return Cut(lambda tables: self(tables) < val)
234 
235  def __le__(self, val):
236  return Cut(lambda tables: self(tables) <= val)
237 
238  def __gt__(self, val):
239  return Cut(lambda tables: self(tables) > val)
240 
241  def __ge__(self, val):
242  return Cut(lambda tables: self(tables) >= val)
243 
244  def __add__(self, other):
245  return Var(lambda tables: self(tables) + other(tables))
246 
247  def __sub__(self, other):
248  return Var(lambda tables: self(tables) - other(tables))
249 
250  def __mult__(self, other):
251  return Var(lambda tables: self(tables)*other(tables))
252 
253  def __truediv__(self, other):
254  return Var(lambda tables: self(tables)/other(tables))
255 
256 class Cut():
257  def __init__(self, cut, invert=False):
258  if type(cut) is not list: cut = [cut]
259  if type(invert) is not list: invert = [invert]
260  assert len(cut) == len(invert), "invalid cut definition!"
261 
262  self._cut = list(cut)
263  self._invert = list(invert)
264 
265  # index that runs over the cutlist
266  self.filteridx = 0
267 
268  # use these to keep track of cuts already computed
269  self._filter = [0]*len(self._cut)
270  self._cutid = [0]*len(self._cut)
271 
272  def reset_cutindices(self):
273  # need to reset after use by loader
274  self._filter = [0]*len(self._cut)
275  self._cutid = [0]*len(self._cut)
276 
277  def __call__(self, tables):
278  if not tables.gone:
279  cutlist = [(~c(tables) if b else c(tables)) for c, b in zip(self._cut, self._invert)]
280  if not tables.interactive:
281  # tables is empty anyway. takes negligible time
282  #return dummy cut series
283  return cutlist[0]
284  else:
285  cut_df = pd.concat(cutlist, axis=1).all(axis=1)
286  cutidx = cut_df.index[np.where(cut_df)]
287  tables._tables['indices'] = cutidx
288  return cut_df
289 
290  # cutid holds the filtered index list after applying the cut on the entire dataset
291  cutidx = self._cutid[self.filteridx]
292  # actual cut that was already computed
293  applycut = self._filter[self.filteridx]
294 
295  # cut is being computed for the first time
296  if cutidx is 0:
297  cut0 = self._cut[self.filteridx](tables)
298  if self._invert[self.filteridx]:
299  cut0 = ~cut0
300 
301  # find filtered index list
302  cutidx = cut0.index[np.where(cut0)]
303 
304  applycut = cut0
305  self._cutid[self.filteridx] = cutidx
306  self._filter[self.filteridx] = applycut
307 
308  self.filteridx += 1
309 
310  # check if filtered index list is empty and if so, stop computing other cuts
311  canfiltermore = all([len(cutidx.codes[k]) for k in range(len(cutidx.codes))])
312 
313  # if its not empty, run next cut on the filtered list rather than the entire dataset
314  if len(self._cut) > self.filteridx and canfiltermore:
315  return self(tables[cutidx])
316  else:
317  # use filtered index list for evaluation of the var that comes later
318  tables._tables['indices'] = cutidx
319  self.filteridx = 0
320  self.reset_cutindices()
321  return applycut
322 
323  def __and__(self, other):
324  return Cut(self._cut + other._cut, self._invert + other._invert)
325 
326  def __invert__(self):
327  cut = Cut(self._cut[0], not self._invert[0])
328  for i in range(1, len(self._cut)):
329  cut = cut | Cut(self._cut[i], not self._invert[i])
330  return cut
331 
332  def __or__(self, other):
333  def orcut(tables):
334  idx = tables._tables['indices']
335  df1 = self(tables)
336  tables._tables['indices'] = idx
337  df2 = other(tables)
338  # or operators are not commutative???
339  compare = pd.concat([df1,df2], axis=1, join='outer').fillna(False)
340  return compare.any(axis=1)
341  return Cut(orcut)
342 
343 
344 class dfproxy(pd.DataFrame):
345  _internal_names = pd.DataFrame._internal_names + ['_proxycols']
346  _internal_names_set = set(_internal_names)
347 
348  # proxy for a dataframe that builds a cache of columns needed to be read from the files
349  # needed before Go() so loader knows what to load
350  @property
351  def _constructor(self):
352  return dfproxy
353 
354  def __init__(self, data=[], **kwargs):
355  pd.DataFrame.__init__(self, data, **kwargs)
356  self._proxycols = list(self.columns.values)
357 
358  def __getitem__(self, key):
359  # add the column
360  if type(key) is str and not key in self._proxycols:
361  self._proxycols.append(key)
362  self.__setitem__(key, np.nan)
363  return self.__getitem__(key)
364  # or all the columns
365  if type(key) is list and not set(key)<=set(self._proxycols):
366  for k in key:
367  self._proxycols.append(k)
368  self.__setitem__(k, np.nan)
369  return self.__getitem__(key)
370  # assume dataframe is being sliced inside cut/var, don't do anything
371  if type(key) is not str and type(key) is not list:
372  return self
373  return pd.DataFrame.__getitem__(self, key)
374 
375  def __setitem__(self, key, val):
376  pd.DataFrame.__setitem__(self, key, val)
377 
378 class loader():
379  def __init__(self, filesource, stride = 1, offset = 0, limit = None, spillcuts=None, index=None):
380 
381  self._files = sourcewrapper(filesource, stride, offset, limit)
382 
383  # _tables stores the entire dataset read from file
384  # index key holds the global index range to be accessed from the dataset by a cut/var
385  self._tables = {'indices':0}
386  self.gone = False
387  self.interactive = False
388  self.histdefs = []
389  self.index=index
390  self.dflist = {}
391  self._spillcuts = spillcuts
392 
393  # add extra spectra to keep track of exposure
394  self._POT = 0
395  self.sum_POT()
396 
397  def getSource(self):
398  return self._files
399 
400  def sum_POT(self):
401  self.summing = True
402 
403  # If not gone, construct spectra
404  if not self.gone:
405  self._potspecnocut = spectrum(self, lambda tables: tables['spill']['spillpot']>0, \
406  lambda tables: tables['spill']['spillpot'])
407 
408  if self._spillcuts:
409  self._potspeccut = spectrum(self, self._spillcuts, \
410  lambda tables: tables['spill']['spillpot'])
411  else:
412  self._potspeccut = None
413 
414  # If gone, fill spectra and compute POT
415  else:
416  self._potspecnocut.fill()
417  self._POTBase = self._potspecnocut.df().sum()
418  if self._potspeccut:
419  self._potspeccut.fill()
420  self._POT = self._potspeccut.df().sum()
421  else:
422  # Use base pot if not using spill cuts
423  self._POT = self._POTBase
424  frac = 100*self._POT/self._POTBase
425  print('Found {:0.5E} POT passing spillcuts from {:0.5E} POT ({:0.1f}%).'.format(self._POT, self._POTBase, frac))
426  self.summing = False
427 
428  def add_spectrum(self, spec):
429  if not spec in self.histdefs:
430  if self._spillcuts is not None and not self.summing:
431  spec._cutfcn = spec._cutfcn & self._spillcuts
432 
433  self.histdefs.append(spec)
434 
435  def reset_index(self):
436  # reset after each spectrum fill
437  self._tables['indices'] = 0
438 
439  def __setitem__(self, key, df):
440  # set multiindex for recTree data
441  index = KL if key.startswith('rec') else KLN if key.startswith('neutrino') else KLS
442  if self.index and key.startswith('rec'):
443  index = self.index
444  df.set_index(index, inplace=True)
445  self._tables[key] = df
446 
447  def __getitem__(self, key):
448  if not self.summing:
449  if type(key)==str and key.startswith('spill'):
450  key = 'rec.'+key
451  # actually build the cache before Go()
452  if type(key) == str and not key in self._tables:
453  # Pick up the right index
454  index = KL if key.startswith('rec') else KLN if key.startswith('neutrino') else KLS
455  if self.index and key.startswith('rec'):
456  index = self.index
457  self[key] = dfproxy(columns=index)
458  # assume key is a filtered index range after a cut
459  if type(key) is not str:
460  self._tables['indices'] = key
461  return self
462  # no filtering
463  if self._tables['indices'] is 0:
464  return self._tables[key]
465  # use global index to slice dataframe requested
466  elif self._tables[key].dropna().empty:
467  # sometimes there's no data available in the file, allow it but warn
468  print("Warning! No data read for %s" % key)
469  return self._tables[key]
470  else:
471  if self._tables[key].index.intersection(self._tables['indices']).empty:
472  return dfproxy(columns=self._tables[key].index.names)
473  else:
474  dfslice = self._tables[key].loc[self._tables['indices']]
475  return dfslice
476 
477  def setupGo(self):
478  if self.gone:
479  return
480  self.gone = True
481  self._filegen = self._files()
482 
483  print("Reading data from %s files : \n" % self._filegen.nFiles())
484 
485  def getFile(self):
486  return self._filegen()
487 
488  def setFile(self, f):
489  self.openfile = f
490 
491  def closeFile(self):
492  self.openfile.close()
493 
494  def readData(self):
495  for key in self._tables:
496  if key is 'indices':
497  continue
498  if not key in self.dflist:
499  self.dflist[key] = []
500  # branches from cache
501  if not key in self.openfile.keys():
502  print("Group %s doesn't exist!" % key)
503  sys.exit(2)
504  group = self.openfile.get(key)
505  values = {}
506  # leaves from cache
507  keycache = self._tables[key]._proxycols
508  for k in keycache:
509  try:
510  dataset = group.get(k)[()]
511  except TypeError:
512  # better error message
513  print("Dataset %s for group %s doesn't exist!" % (k, group))
514  sys.exit(2)
515  if dataset.shape[1] == 1:
516  dataset = dataset.flatten()
517  else:
518  dataset = list(dataset)
519  values[k] = dataset
520  self.dflist[key].append(pd.DataFrame(values))
521 
522  def fillSpectra(self):
523  self.concat_time = time.time()
524  for key in self.dflist:
525  # set index for all dataframes
526  self[key] = pd.concat(self.dflist[key])
527  # sort index
528  self._tables[key].sort_index(inplace=True)
529  self.dflist = {}
530 
531  # Compute POT and then fill spectra
532  self.sum_POT()
533 
534  # Let's not refill these
535  self.histdefs.remove(self._potspecnocut)
536  if self._potspeccut:
537  self.histdefs.remove(self._potspeccut)
538 
539  spec_idx = 0
540  # spec_progbar = ProgressBar(len(self.histdefs))
541  print("Filling %s spectra\n" % len(self.histdefs))
542  for spec in self.histdefs:
543  spec_idx += 1
544  # spec_progbar.update(spec_idx)
545  spec.fill()
546 
547  def Go(self):
548  t0 = time.time()
549  self.setupGo()
550  file_idx = 0
551  # file_progbar = ProgressBar(self._filegen.nFiles())
552  while True:
553  try:
554  fname = self.getFile()
555  self.setFile(h5py.File(fname, 'r'))
556  self.readData()
557  self.closeFile()
558 
559  file_idx += 1
560  # file_progbar.update(file_idx)
561  except StopIteration:
562  break
563 
564  self.fillSpectra()
565  # cleanup
566  self.cleanup()
567  print("\nTotal time : %s sec\n" % (time.time() - t0))
568 
569  def cleanup(self):
570  # free up some memory
571  self._tables = {'indices':0}
572  # remove associations with spectra
573  self.histdefs = []
574 
575 # Different loaders end up starting their own SAM projects, even for the exact same queries.
576 # This doesn't guarantee that they'll run over the files in the same order.
577 # Coupled with the fact that the projects can be shared over different grid jobs,
578 # this can result in unexpected behaviour if the macro expects them to share the same data downstream.
579 # This class allows the user to use a single project over multiple loaders
581  def __init__(self, loaders):
582  self.loaders = loaders
583  assert len(self.loaders) > 0, "Can't associate empty list of loaders!"
584  # ensure all loaders have the same file source ID
585  assert all([self.loaders[0].getSource() == l.getSource() for l in self.loaders]), \
586  "Can only associate loaders with unique queries"
587 
588  self._files = self.loaders[0].getSource()
589  self.gone = False
590 
591  def Go(self):
592  t0 = time.time()
593  self.setupGo()
594  print("Associating %d loaders with the same dataset : \n" % len(self.loaders))
595  file_idx = 0
596  # file_progbar = ProgressBar(self._filegen.nFiles())
597  while True:
598  try:
599  fname = self.getFile()
600  self.setFile(h5py.File(fname, 'r'))
601  for ldr in self.loaders:
602  ldr.gone = True
603  ldr.setFile(self.openfile)
604  ldr.readData()
605  self.closeFile()
606  file_idx += 1
607 
608  except StopIteration:
609  break
610  ldr_idx = 1
611  for ldr in self.loaders:
612  print ("\n------------------------------")
613  print ("Filling spectra for loader %d" % ldr_idx)
614  ldr.fillSpectra()
615  ldr.cleanup()
616  ldr_idx += 1
617  print("\nTotal time : %s sec\n" % (time.time() - t0))
618 
620  def __init__(self, files):
621  if type(files) is not list:
622  files = [files]
623  self._files = files
624  self._tables = {}
625  self.gone = False
626  self.interactive = True
627  self._tables = {'indices':0}
628 
629  def keys(self, contain=None):
630  f = self._files[0]
631  h5 = h5py.File(f, 'r')
632  keys = h5.keys()
633  for k in keys:
634  if contain:
635  if contain in k:
636  print(k)
637  else:
638  print(k)
639  h5.close()
640 
641  return keys
642 
643  def __getitem__(self, key):
644  if not key in self._tables:
645  dflist=[]
646  for fname in self._files:
647  f = h5py.File(fname,'r')
648  group = f.get(key)
649  values = {}
650  for k in group.keys():
651  dataset = group.get(k)[()]
652  if dataset.shape[1] == 1:
653  dataset = dataset.flatten()
654  else:
655  dataset = list(dataset)
656  values[k] = dataset
657  dflist.append(pd.DataFrame(values))
658  f.close()
659  df = pd.concat(dflist)
660  if not (key.startswith('spill') or key.startswith('neutrino')):
661  df.set_index(KL, inplace=True)
662  self._tables[key] = df
663  return self._tables[key]
def sum_POT(self)
Definition: core.py:400
def __truediv__(self, other)
Definition: core.py:253
def __init__(self, loaders)
Definition: core.py:581
keys
Reco plots.
Definition: caf_analysis.py:46
def __and__(self, b)
Definition: core.py:90
def __lt__(self, val)
Definition: core.py:232
def integral(self, POT=None)
Definition: core.py:78
def setupGo(self)
Definition: core.py:477
def __and__(self, other)
Definition: core.py:323
def cleanup(self)
Definition: core.py:569
def setFile(self, f)
Definition: core.py:488
def __add__(self, other)
Definition: core.py:244
def getSource(self)
Definition: core.py:397
def histogram(self, bins=None, range=None, POT=None)
Definition: core.py:70
def __eq__(self, val)
Definition: core.py:226
def __getitem__(self, key)
Definition: core.py:447
def _constructor(self)
Definition: core.py:351
def add_spectrum(self, spec)
Definition: core.py:428
def reset_index(self)
Definition: core.py:435
def getFile(self)
Definition: core.py:485
def __call__(self, tables)
Definition: core.py:277
def __init__(self, filesource, stride=1, offset=0, limit=None, spillcuts=None, index=None)
Definition: core.py:379
def load_spectra(fname, groups)
Definition: core.py:164
void append()
Definition: append.C:24
Eigen::Matrix< T, Eigen::Dynamic, 1 > head(const Eigen::Matrix< T, Eigen::Dynamic, 1 > &v, size_t n)
Definition: head.hpp:24
def __add__(self, b)
Definition: core.py:85
def __gt__(self, val)
Definition: core.py:238
def __ne__(self, val)
Definition: core.py:229
def __ge__(self, val)
Definition: core.py:241
def __invert__(self)
Definition: core.py:326
def __init__(self, var)
Definition: core.py:220
bool print
std::string format(const int32_t &value, const int &ndigits=8)
Definition: HexUtils.cpp:14
def __le__(self, val)
Definition: core.py:235
def save_tree(fname, spectra, groups, attrs=True)
Definition: core.py:133
def __init__(self, data=[], kwargs)
Definition: core.py:354
def keys(self, contain=None)
Definition: core.py:629
def __init__(self, cut, invert=False)
Definition: core.py:257
def __init__(self, tables, cut, var, weight=None, name=None)
Definition: core.py:15
def __or__(self, other)
Definition: core.py:332
def __init__(self, df, pot, weight=None)
Definition: core.py:103
const Cut cut
Definition: exporter_fd.C:30
def to_text(self, fileName, sep=' ', header=False)
Definition: core.py:82
def load_tree(fname, groups, attrs=True)
Definition: core.py:183
def __setitem__(self, key, df)
Definition: core.py:439
def closeFile(self)
Definition: core.py:491
def __setitem__(self, key, val)
Definition: core.py:375
def save_spectra(fname, spectra, groups)
Definition: core.py:116
def readData(self)
Definition: core.py:494
def __mult__(self, other)
Definition: core.py:250
def __sub__(self, other)
Definition: core.py:247
def fillSpectra(self)
Definition: core.py:522
Double_t sum
Definition: plot.C:31
def __getitem__(self, key)
Definition: core.py:358
Float_t w
Definition: plot.C:20
def reset_cutindices(self)
Definition: core.py:272
def __call__(self, tables)
Definition: core.py:223