SupernovaUtilities.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 
3 # A bunch of useful functions for supernova studies
4 from math import fabs, exp, log10
5 from ROOT import TGraphAsymmErrors, TH1F, TH2F
6 from array import array
7 # Alpha function
8 
9 
10 def alpha_function(energy, alpha, mean_energy):
11  if mean_energy == 0:
12  return 1.
13  # return (energy**(alpha)) * exp( - (alpha + 1.) * (energy / mean_energy) )
14  return ((energy / mean_energy)**alpha) * exp(- (alpha + 1.) * (energy / mean_energy))
15 
16 
17 def fermi_dirac_function(energy, eta, T):
18  if T == 0:
19  return 1.
20  return (energy * energy) / (1 + (exp((energy / T) - eta)))
21 # Unit conversions
22 
23 
24 def convert_erg_to_mev(lumi_in_ergs):
25  return lumi_in_ergs * (624.15 * 1000.)
26 
27 
28 def convert_mev_to_erg(lumi_in_mev):
29  return lumi_in_mev / (624.15 * 1000.)
30 # The range function for floats
31 
32 
33 def drange(start, stop, step):
34  r = start
35  while (fabs((r - stop) / stop) > 0.00000001) and (r < stop):
36  yield r
37  r += step
38 # Make a list of bins evenly spaced in logarithm
39 
40 
41 def MakeLogList(start, finish, n_bins, round_to_nearest=False, second_bin_start=False, verbose=False):
42  if verbose:
43  print "\nSUtil: --- Make log list"
44  print "SUtil: start: ", start
45  print "SUtil: finish: ", finish
46  print "SUtil: n bins: ", n_bins
47  print "SUtil: rounding: ", round_to_nearest
48  zero_start = False
49  if start == 0:
50  if second_bin_start:
51  start = second_bin_start
52  elif round_to_nearest:
53  start = round_to_nearest
54  else:
55  start = 1
56  zero_start = True
57  n_orders_magnitude = log10(finish) - log10(start)
58  log_bin_width = n_orders_magnitude / float(n_bins)
59  log_start = log10(start)
60  log_finish = log10(finish)
61  if round_to_nearest:
62  bins = []
63  for i in drange(log_start, log_finish, log_bin_width):
64  raw = round_to_nearest * (int(10.**i) / int(round_to_nearest))
65  if ((10.**i - raw) > (float(round_to_nearest) / 2.)):
66  raw += round_to_nearest
67  # print 10.**i,raw
68  bins.append(raw)
69  else:
70  bins = [10**i for i in drange(log_start, log_finish, log_bin_width)]
71  bins.append(finish)
72 
73  filtered_bins = []
74  for bin in bins:
75  if bin not in filtered_bins:
76  filtered_bins.append(bin)
77 
78  tmp_gaps = []
79  for i, bin in enumerate(filtered_bins):
80  if i == 0:
81  continue
82  tmp_gaps.append(bin - filtered_bins[i - 1])
83 
84  if round_to_nearest:
85  final_bins = [filtered_bins[0]]
86  for i, gap in enumerate(tmp_gaps):
87  if i != 0:
88  if gap < max(tmp_gaps[:i]):
89  gap = max(tmp_gaps[:i])
90  new_boundary = final_bins[i] + gap
91  if new_boundary >= finish:
92  final_bins.append(finish)
93  break
94  else:
95  final_bins.append(new_boundary)
96  else:
97  final_bins = filtered_bins
98 
99  if zero_start:
100  final_bins.insert(0, 0)
101 
102  gaps = []
103  for i, bin in enumerate(final_bins):
104  if i == 0:
105  continue
106  gaps.append(bin - final_bins[i - 1])
107 
108  if verbose:
109  print "SUtil: n orders of magnitude: ", n_orders_magnitude
110  print "SUtil: log bin width: ", log_bin_width
111  print "SUtil: log start: ", log_start
112  print "SUtil: log finish: ", log_finish
113  print "SUtil: bins : ", final_bins
114 
115  print "SUtil: gaps : ", gaps
116  #assert False
117  return final_bins
118 # Get the bin edges and return as a list
119 
120 
121 def GetBins(axis):
122  this_bins = []
123  for i in range(1, axis.GetNbins() + 1):
124  lower_edge = axis.GetBinLowEdge(i)
125  upper_edge = lower_edge + axis.GetBinWidth(i)
126  if i == 1:
127  this_bins.append(lower_edge)
128  this_bins.append(upper_edge)
129  return_bins = array("f", this_bins)
130  # print return_bins
131  #assert False
132  return return_bins
133 # Convert a list of pairs (x,y) to a Graphs
134 
135 
136 def MakeGraph(points, name="graph"):
137  # print "\nSUtil: --- Supernova Utilities: Make graph: %s"%name
138  graph = TGraphAsymmErrors(len(points))
139  graph.SetName(name)
140  graph.SetTitle(name)
141  for i, p in enumerate(points):
142  graph.SetPoint(i, p[0], p[1])
143  return graph
144 # Convert a list of points to a list of bin edges
145 
146 
147 def CreateListOfBins(points, centred=True):
148  # if centred is set then the list will define the bin centres
149  # if no then it will define the bin edges with an extra bin edge added on the low side
150  # lower bin edge
151  if centred:
152  initial_delta = (points[1] - points[0]) / 2.
153  bins = [points[0] - initial_delta]
154 
155  # 0 -> n-1 bin edges
156  for i, point in enumerate(points):
157  if i == (len(points) - 1):
158  continue
159  delta = (points[i + 1] - point) / 2.
160  bins.append(point + delta)
161 
162  # final bin edge
163  final_delta = (points[-1] - points[-2]) / 2.
164  bins.append(points[-1] + final_delta)
165  else:
166  initial_delta = (points[1] - points[0])
167  bins = [points[0] - initial_delta]
168  for i, point in enumerate(points):
169  bins.append(point)
170 
171  return bins
172 # Booking for the standard three flavour histograms
173 
174 
175 def BookThreeFlavourHistograms(time_bins, energy_bins):
176  histograms = {}
177  for i in range(3):
178  # Luminosity in 10**50 ergs
179  histograms["lumi_%i" % i] = TH1F(
180  "lumi_%i" % i, "lumi_%i" % i, len(time_bins) - 1, time_bins)
181  # Luminosity in MeV
182  histograms["lumi_mev_%i" % i] = TH1F(
183  "lumi_mev_%i" % i, "lumi_mev_%i" % i, len(time_bins) - 1, time_bins)
184  # Energy in MeV
185  histograms["E_%i" % i] = TH1F(
186  "E_%i" % i, "E_%i" % i, len(time_bins) - 1, time_bins)
187  # Alpha
188  histograms["alpha_%i" % i] = TH1F(
189  "alpha_%i" % i, "alpha_%i" % i, len(time_bins) - 1, time_bins)
190  # Eta
191  histograms["eta_%i" % i] = TH1F(
192  "eta_%i" % i, "eta_%i" % i, len(time_bins) - 1, time_bins)
193  # T
194  histograms["T_%i" % i] = TH1F(
195  "T_%i" % i, "T_%i" % i, len(time_bins) - 1, time_bins)
196  # Time vs. Energy vs. Fluence (10**50)
197  histograms["Time_Energy_Fluence_%i" % i] = TH2F("Time_Energy_Fluence_%i" % i, "Time_Energy_Fluence_%i" % i, len(
198  time_bins) - 1, time_bins, len(energy_bins) - 1, energy_bins)
199  return histograms
200 # Reading functions for our various input files
201 # Garching Legacy
202 # This reader is based on the data files taken from:
203 # https://wiki.bnl.gov/dusel/index.php/Models
204 
205 
207  # Garching files are provided in 10 columns:
208  # 0: time after bounce [s]
209  # 1 - 3: L_nu [erg/s], electron-n. electron antin. muon/tau n.
210  # 4 - 6: <E> [MeV], electron-n. electron antin. muon/tau n.
211  # 7 - 9: alpha, electron-n. electron antin. muon/tau n.
212  input = "./data/fluxes/garching_neutrino_data_sf.dat"
213  # The input file doesn't specify the order of magnitude of lumi
214  # comparing by eye to PRL 104, 251101 (2010) I think it's 10^52
215  # our axis labels are in lumi units of 10^50, so we should multiply by 100
216  LUMI_MAG = 1e2
217  the_info = {}
218  for line in open(input, "r").readlines():
219  t = line.split()
220  if len(t) != 10:
221  continue
222  if t[0] == "[s]":
223  continue
224  # print line.strip()
225  the_info[float(t[0])] = []
226  for i in range(1, 10):
227  if i in [1, 2, 3]:
228  the_info[float(t[0])].append(float(t[i]) * LUMI_MAG)
229  else:
230  the_info[float(t[0])].append(float(t[i]))
231  # print the_info[float(t[0])]
232  return the_info
233 # Garching
234 # This reader is based on the data files taken from:
235 # http://www.mpa-garching.mpg.de/ccsnarchive/data/Huedepohl2010-data/index.html
236 # - ask m.tamsett@sussex.ac.uk for username and password
237 # Phys. Rev. Lett. 104, 251101 (2010)
238 # These files were provided by the authors of the above.
239 
240 
242  # Garching files are provided in 3 files each containing the time dependence of a neutrino species
243  # The coloumns are:
244  # 0: time [s]
245  # 1: luminosity [1e51 erg/s]
246  # 2: <e> [MeV]
247  # 3: <e^2> [MeV^2]
248  # 4: <e^3> [MeV^3]
249  # 5: alpha
250  # 6: eta
251  # 7: T [MeV/boltzmann]
252  # Specify the order of magnitude of the variables
253  # our axis labels are in lumi units of 10^50
254  LUMI_MAG = 1e1
255  file_path = "./data/fluxes/"
256  # map the file names to the same bins as were used for garching
257  # time, Lumi nu_e, anti-nu_e, nu_x, repeat for E, alpha, eta, T
258  name_to_bin_dict = {
259  "garching_sf_neutrino_signal_nu_e.dat": 0,
260  "garching_sf_neutrino_signal_nubar_e.dat": 1,
261  "garching_sf_neutrino_signal_nu_x.dat": 2
262  }
263  # Read log
264  the_info = {}
265  for i, input in enumerate(name_to_bin_dict.keys()):
266  for j, line in enumerate(open(file_path + input, "r").readlines()):
267  # Debug: there are lots of time points, so skip most of them for debug
268  # if j%10: continue
269  t = line.split()
270  if t[0] == "#":
271  continue
272  assert(len(t) == 8)
273  time = float(t[0])
274 
275  # there are some odd features in the first four time points (well before core bounce)
276  # so we'll simply skip these for now (as recommended by authors).
277  if time < -6.26270506e-02:
278  continue
279 
280  if i == 0:
281  the_info[time] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
282  else:
283  assert time in the_info.keys()
284  # Luminosity
285  the_info[time][name_to_bin_dict[input]] = float(t[1]) * LUMI_MAG
286  # Mean energy
287  the_info[time][3 + name_to_bin_dict[input]] = float(t[2])
288  # energy parameterisation
289  the_info[time][6 + name_to_bin_dict[input]] = float(t[5])
290  # the_info[time][6+name_to_bin_dict[input]]= 2. # Maxwell-Boltzmann spectrum
291  # the_info[time][6+name_to_bin_dict[input]]= 2.5 # Fermi-Dirac
292  # spectrum
293  the_info[time][9 + name_to_bin_dict[input]] = float(t[6])
294  the_info[time][12 + name_to_bin_dict[input]] = float(t[7])
295 
296  return the_info
297 # Livermore
298 # These reader is based on the data files taken from:
299 # https://wiki.bnl.gov/dusel/index.php/Models
300 # This data is very suspect. A more accurate version was provided by the authors and has
301 # been parsed in the SND format.
302 
303 
305  # Livermore files are provided in 6 files each containing the time dependence of a quantity
306  # Specify the order of magnitude of the variables
307  # our axis labels are in lumi units of 10^50
308  LUMI_MAG = 1e50
309  # and energy is in MeV
310  E_MAG = 1e6
311  # Alpha is not provided so default to a single value
312  ALPHA = 2.
313  file_path = "./data/fluxes/"
314  # map the file names to the same bins as were used for garching legacy
315  # time, Lumi nu_e, anti-nu_e, nu_x, repeat for E, alpha
316  name_to_bin_dict = {
317  "livermore.Lnue.dat": 0,
318  "livermore.Lanue.dat": 1,
319  "livermore.Lnux.dat": 2,
320  "livermore.Enue.dat": 3,
321  "livermore.Eanue.dat": 4,
322  "livermore.Enux.dat": 5
323  }
324  # Read log
325  the_info = {}
326  # These files have variable numbers of bins!!
327  # So to standardise them lets fill the values into a graph
328  # then interpolate between the values. 511 bins matches the garching
329  # LEGACY files
330  time_bins = MakeLogList(0.03, 10, 511)
331  for i, input in enumerate(name_to_bin_dict.keys()):
332  points = []
333  for line in open(file_path + input, "r").readlines():
334  t = line.split()
335  time = float(t[0])
336  val = float(t[1])
337  if "L" in input:
338  val = val / LUMI_MAG
339  # in order to conform with the garching files we expect nu_x to contain the sum over the 4 flavours
340  # the livermore file gives this individually.
341  # Therefore we multiply by 4
342  if "nux" in input:
343  val = val * 4.
344  else:
345  val = val / E_MAG
346  points.append((time, val))
347  the_graph = MakeGraph(points, name=input)
348  #MC.Fill_Na( [the_graph],[input],"x;y", draw_option="LP",redraw_axis=False )
349  i_points = []
350  for t in time_bins:
351  val = the_graph.Eval(t)
352  i_points.append((t, val))
353  if i == 0:
354  the_info[t] = [0, 0, 0, 0, 0, 0, ALPHA, ALPHA, ALPHA]
355  else:
356  assert t in the_info.keys()
357  the_info[t][name_to_bin_dict[input]] = val
358  the_i_graph = MakeGraph(i_points, name="i_" + input)
359  return the_info
360 # Translate parameterised data into histograms
361 
362 
363 def ParameterisedDataToHistograms(raw_data, parameterisation="alpha", verbose=False):
364  # These files include multiple methods of parameterisating the energy dependence
365  # TODO: Add a configurable to select this (implement modes other than
366  # alpha later)
367  assert(parameterisation in ["alpha", "fermi-dirac"]
368  ), "Unconfigured parameterisation requested: %s" % parameterisation
369 
370  # In order to make the data more useful we now transform it into a 2-D
371  # histogram
372  time_points = raw_data.keys()
373  time_points.sort()
374  print "SUtil: found %i time points" % len(time_points)
375  # transform these into a binning list
376  # work out the offset so that each time point is on a bin centre
377  time_list = CreateListOfBins(time_points)
378  # transform these into an array so we can use them in a THistograms
379  from array import array
380  time_bins = array("f", time_list)
381  # make a range of energy bins in MeV
382  # snow globes will later expect 501 bins so this should be the number we
383  # produce here
384  lower_energy = 1
385  upper_energy = 101.2
386  energy_steps = 501
387  # smaller values are useful for fast debugging - they should work fine with the logic here
388  # (they will just make things more complicated upstream)
389  #upper_energy = 101.
390  #energy_steps = 100
391  energy_range = [a for a in drange(float(lower_energy), float(upper_energy),
392  float((upper_energy - lower_energy) / float(energy_steps)))]
393  energy_delta = fabs(energy_range[1] - energy_range[0]) / 2.
394  energy_bins = array("f", [i - energy_delta for i in energy_range])
395  print "SUtil: making %i energy points" % len(energy_bins)
396  # there are three neutrino types given, electron, positron and mu/tau (sometimes called x)
397  # make a histogram for each of these
398  # use indices corresponding to the list order in raw_data:
399  # [e-, e+, x]
400  histograms = BookThreeFlavourHistograms(time_bins, energy_bins)
401  # Now loop over the raw data and fill the histograms
402  # loop over time points
403  if verbose:
404  print "SUtil: --- Filling histograms"
405  for i_time, time in enumerate(time_points):
406  time_bin = histograms["lumi_1"].GetXaxis().FindBin(time)
407  time_width = histograms["lumi_1"].GetXaxis().GetBinWidth(time_bin)
408  if verbose:
409  print "SUtil: time: %e, time bin: %i, width: %e" % (time, time_bin, time_width)
410  # sanity check that the bin we've found maps to our list of time points
411  # as it should
412  assert (time_bin == i_time + 1), "time point[%i]: %f found in bin %i, should be bin %i" % (
413  i_time, time, time_bin, i_time + 1)
414 
415  # loop over each neutrino flavour
416  for i_flavour in range(3):
417  if verbose:
418  print "SUtil: --- Neutrino flavour: %i" % i_flavour
419  # read the basic quantities from raw data
420  lumi = raw_data[time][i_flavour]
421  mean_energy = raw_data[time][i_flavour + 3]
422  alpha = raw_data[time][i_flavour + 6]
423  lumi_mev = convert_erg_to_mev(lumi)
424  if len(raw_data[time]) == 15:
425  eta = raw_data[time][i_flavour + 9]
426  temp = raw_data[time][i_flavour + 12]
427  else:
428  eta = 0.
429  temp = 0.
430  assert(parameterisation != "fermi-dirac")
431  if verbose:
432  print "SUtil: Lumi: %.2f Erg / s (%.2f MeV / s), Mean energy: %.2f MeV, Alpha: %.2f" % (lumi, lumi_mev, mean_energy, alpha)
433  # fill the 1-D histograms
434  histograms["lumi_%i" % i_flavour].SetBinContent(time_bin, lumi)
435  histograms["lumi_mev_%i" %
436  i_flavour].SetBinContent(time_bin, lumi_mev)
437  histograms["E_%i" % i_flavour].SetBinContent(time_bin, mean_energy)
438  histograms["alpha_%i" % i_flavour].SetBinContent(time_bin, alpha)
439  histograms["eta_%i" % i_flavour].SetBinContent(time_bin, eta)
440  histograms["T_%i" % i_flavour].SetBinContent(time_bin, temp)
441  # fill 2-d histogram
442  ### IMPORTANT ###
443  # We need to keep track of the total luminosity contained in the energy histogram.
444  # - lumi is in units of energy
445  # The later interpretation of the lumi histogram will define how we determine the normalisation here.
446  # Snowglobes takes a fluence of neutrinos per energy bin - i.e a number integrated over time.
447  # Therefore it's total luminosity will be the sum over all bins of bin_energy * fluence.
448  # Here we know the value of the instantaneous luminosity at each point in time and we know the mean energy
449  # and the shape of the energy distribution to distribute the luminosity value over the energy curve
450  ### --------- ###
451  alpha_sum = 0.
452  # loop over energy bins
453  if verbose:
454  print "SUtil: --- Time energy alpha(energy) matrix"
455  for energy in energy_range:
456  energy_bin = histograms[
457  "Time_Energy_Fluence_1"].GetYaxis().FindBin(energy)
458  if parameterisation == "alpha":
459  this_alpha = alpha_function(
460  float(energy), alpha, mean_energy)
461  else:
462  if verbose:
463  print "SUtil: Fermi-dirac parameters: energy: %f, eta: %f, temp: %f" % (energy, eta, temp)
464  this_alpha = fermi_dirac_function(float(energy), eta, temp)
465  histograms["Time_Energy_Fluence_%i" % i_flavour].SetBinContent(
466  time_bin, energy_bin, this_alpha)
467  # the z-value of each bin is now the alpha function at this
468  # energy
469  alpha_sum += (this_alpha * energy)
470  if verbose:
471  print "SUtil: bin[%3i, %3i]: alpha(energy): %f" %\
472  (time_bin, energy_bin, this_alpha)
473  pass # end of first loop over energy bins
474  if verbose:
475  print "SUtil: alpha*energy sum: %f" % alpha_sum
476  # now we know the energy sum, loop again to correctly normalise
477  if verbose:
478  print "SUtil: --- Time energy fluence matrix"
479  norm_energy_sum = 0
480  norm_fluence_sum = 0.
481  for energy in energy_range:
482  energy_bin = histograms[
483  "Time_Energy_Fluence_1"].GetYaxis().FindBin(energy)
484  this_alpha = histograms["Time_Energy_Fluence_%i" %
485  i_flavour].GetBinContent(time_bin, energy_bin)
486  # we now normalise this value such that the sum over all bins of (z-value * energy) = lumi_mev
487  # i.e the z-value is now a number - a flux
488  flux = this_alpha * (lumi_mev / alpha_sum)
489  # keep track of the running normalised energy sum for cross
490  # checks
491  norm_energy_sum += flux * energy
492  # flux is now converted into a fluence
493  # flux = n per unit time
494  # fluence = n per bin time_width
495  # fluence = flux [/s] * time_width [s]
496  fluence = flux * time_width
497  norm_fluence_sum += fluence * energy
498  histograms["Time_Energy_Fluence_%i" % i_flavour].SetBinContent(
499  time_bin, energy_bin, fluence)
500  if verbose:
501  print "SUtil: bin[%3i, %3i]: fluence: %f" %\
502  (time_bin, energy_bin, fluence)
503  pass # end of second loop over energy bins
504  if verbose:
505  print "SUtil: normalised energy sum: %f" % norm_energy_sum
506  print "SUtil: normalised fluence sum: %f" % norm_fluence_sum
507  # Check that the normalisation worked to within some floating point
508  # tolerence
509  assert((lumi_mev == 0.) or (fabs(norm_energy_sum - lumi_mev) < (0.0005 * lumi_mev))
510  ), "Normalised energy spectrum %f != luminosity %f" % (norm_energy_sum, lumi_mev)
511  pass # end of loop over neutrino flavours
512  pass # end of loop over time points
513  return histograms
514 # Parsing functions for SND input files
515 # These functions will parse the files provided in the Supernova neutrino database
516 # http://asphwww.ph.noda.tus.ac.jp/snn/
517 # Astrophys. J. Supp. 205 (2013) 2, arXiv:1210.6841 [astro-ph.HE]
518 # Time integrated files
519 
520 
522  assert False, "Fail: Not implemented yet"
523 # Time dependent files
524 
525 
526 def ParseSNDFile(model, verbose=False):
527  LUMI_MAG = 1e50
528  input = "./data/%s.data" % model
529  time = False
530  the_info = {}
531  times = []
532  energy_bins = []
533  values = {}
534  # loop over the data file
535  for line in open(input, "r").readlines():
536  t = line.split()
537  if len(t) == 0:
538  continue # skip empty lines
539  if len(t) == 1:
540  # This denotes a new time bin
541  time = float(t[0])
542  times.append(time)
543  values[time] = []
544  continue
545  assert(len(t) == 8), "SUtil: SND file with spectrum line length != 8: %i" % len(t)
546  # look at the energy boundaries
547  # if we're in the first time block then we initialise the energy bins
548  # else we assert they exist
549  lower_e = float(t[0])
550  upper_e = float(t[1])
551  if len(times) != 1:
552  assert(lower_e in energy_bins)
553  assert(upper_e in energy_bins)
554  else:
555  if len(energy_bins) == 0:
556  energy_bins.append(lower_e)
557  assert(lower_e in energy_bins)
558  energy_bins.append(upper_e)
559  # now store the number fluxes as a tuple
560  # recall these values are divided by both the energy bin width and the time bin width
561  #values[time].append( (float(t[2]),float(t[3]),float(t[4])) )
562  values[time].append((float(t[2]), # Numbers
563  float(t[3]),
564  float(t[4]) * 4., # total nu_x
565  float(t[5]), # Luminosities
566  float(t[6]),
567  float(t[7]) * 4. # total nu_x
568  ))
569  pass # end of loop over lines
570  if verbose:
571  print "SUtil: found %i time points" % len(times)
572  print "SUtil: found %i energy bin edges (%i bins)" % (len(energy_bins), len(energy_bins) - 1)
573  # now book and fill histograms
574  time_bins = array("f", CreateListOfBins(times, centred=False))
575  histograms = BookThreeFlavourHistograms(time_bins, array("f", energy_bins))
576  # The authors provide tables of integrated energies and luminosities
577  # we reproduce those here as a cross check.
578  # Note they interpolate so our numbers won't match theirs exactly.
579  cross_check_table = []
580  cross_check_table_header = []
581  for i in range(3):
582  for i_time in range(len(times)):
583  # find the time span + special case for the first bin
584  if i_time == 0:
585  d_time = times[1] - times[0]
586  else:
587  d_time = times[i_time] - times[i_time - 1]
588  sum_number = 0.
589  sum_lumi = 0.
590  # sanity check, are the number of energy keys at this time the same
591  # as the number of energy bin centers
592  assert (len(energy_bins) - 1 == len(values[times[i_time]]))
593  for i_energy, value in enumerate(values[times[i_time]]):
594  d_energy = energy_bins[i_energy + 1] - energy_bins[i_energy]
595 
596  # want time, energy fluence to contain the _number_ of neutrinos in a given time & energy bin
597  # note this means that variable bin sizes will lead to
598  # non-continuous ditributions
599  histograms["Time_Energy_Fluence_%i" % i].SetBinContent(
600  i_time + 1, i_energy + 1, (value[i] * d_energy * d_time) / LUMI_MAG)
601  # the other distributions are cross check distributions
602  # we will also fill the number of neutrinos here
603  # to get continuous distributions to show you should divide by
604  # bin width
605  sum_number += value[i] * d_energy * d_time
606  sum_lumi += value[i + 3] * d_energy * d_time
607 
608  # fill cross check table
609  if (i_time == 0) and (i == 0):
610  cross_check_table_header = [0., 0., 0., 0., 0., 0.]
611  cross_check_table.append([energy_bins[i_energy], energy_bins[
612  i_energy + 1], 0., 0., 0., 0., 0., 0.])
613  cross_check_table[i_energy][i + 2] += value[i] * d_time
614  cross_check_table[i_energy][i + 3 + 2] += value[i + 3] * d_time
615  cross_check_table_header[i] += value[i] * d_time * d_energy
616  cross_check_table_header[
617  i + 3] += value[i + 3] * d_time * d_energy
618 
619  mean_energy = convert_erg_to_mev(sum_lumi) / sum_number
620  histograms["E_%i" % i].SetBinContent(i_time + 1, mean_energy)
621 
622  histograms["lumi_%i" % i].SetBinContent(
623  i_time + 1, sum_lumi / LUMI_MAG)
624  histograms["lumi_mev_%i" % i].SetBinContent(
625  i_time + 1, convert_erg_to_mev(sum_lumi / LUMI_MAG))
626 
627  # Unused here as we have the shape of the energy spectrum vs. time
628  histograms["alpha_%i" % i].SetBinContent(i_time + 1, 1.)
629  # print cross check table
630  if verbose:
631  string = ""
632  for i in cross_check_table_header:
633  string += "%e " % i
634  print string
635  for line in cross_check_table:
636  string = ""
637  for i in line:
638  string += "%e " % i
639  print string
640  return histograms
641 # Energy bin conversion
642 
643 
644 def ConvertTimeEnergyFluenceToSnowglobesFormat(histogram, interpolate=False, verbose=False):
645  """
646  This method will take a flavour TimeEnergyFluence histograms
647  and convert their energy axis so that we have the correct binning for snowglobes:
648  501 bins from 0 to 100 MeV
649 
650  Interpolate is useful if the original energy bins are coarser than the snowglobes ones
651  as rather than using the stepped distribution, we instead interpolate between the steps.
652  """
653  # make a range of energy bins in MeV
654  # snow globes will later expect 501 bins so this should be the number we
655  # produce here
656  lower_energy = 1
657  upper_energy = 101.2
658  energy_steps = 501
659  energy_range = [a for a in drange(float(lower_energy), float(upper_energy),
660  float((upper_energy - lower_energy) / float(energy_steps)))]
661  energy_delta = fabs(energy_range[1] - energy_range[0]) / 2.
662  energy_bins = array("f", [i - energy_delta for i in energy_range])
663  # make the return histogram
664  time_bins = GetBins(histogram.GetXaxis())
665  if verbose:
666  print "SUtil: got %i time points" % len(time_bins)
667  return_histogram = TH2F(histogram.GetName() + "_snowglobed", histogram.GetTitle() + "_snowglobed",
668  len(time_bins) - 1, time_bins, len(energy_bins) - 1, energy_bins)
669  old_energy_bins = GetBins(histogram.GetYaxis())
670  if verbose:
671  print "SUtil: got %i energy points" % len(old_energy_bins)
672  print "SUtil: making %i energy points" % len(energy_bins)
673  if interpolate:
674  if verbose:
675  print "SUtil: preparing interpolation graph"
676  # In order to interpolate graphs have to be continuous
677  interpolation_graphs = []
678  for time_bin in range(histogram.GetXaxis().GetNbins() + 2):
679  # if verbose: print "SUtil: time bin[%i]"%time_bin
680  points = []
681  for energy_bin in range(histogram.GetYaxis().GetNbins() + 2):
682  fluence = histogram.GetBinContent(time_bin, energy_bin)
683  # to make these continuous we must divide by energy bin width
684  fluence = fluence / histogram.GetYaxis().GetBinWidth(energy_bin)
685  energy = histogram.GetYaxis().GetBinCenter(energy_bin)
686  points.append((energy, fluence))
687  # if verbose:
688  # print "SUtil: energy bin[%i]: fluence: %f"%(energy_bin,
689  # fluence)
690  interpolation_graphs.append(MakeGraph(
691  points, name="%s_energy_interpolation_graph_%i" % (histogram.GetName(), time_bin)))
692  # now loop over the new bins and work out how to divide up the old ones
693  for i_e, energy in enumerate(energy_bins):
694  if i_e == 0:
695  continue
696  # find the bins spanned & what fraction of each is included
697  min_bin = histogram.GetYaxis().FindBin(energy_bins[i_e - 1])
698  max_bin = histogram.GetYaxis().FindBin(energy_bins[i_e])
699  if verbose:
700  print "SUtil: energy range[%.3f -> %.3f] spans %2i bins [%3i -> %3i] %f -> %f" %\
701  (energy_bins[i_e - 1], energy_bins[i_e], (max_bin - min_bin) + 1, min_bin, max_bin,
702  histogram.GetYaxis().GetBinLowEdge(min_bin),
703  histogram.GetYaxis().GetBinUpEdge(max_bin))
704  bin_fractions = []
705  eval_centres = []
706  #bin_widths = []
707  for j, energy_bin in enumerate(range(min_bin, max_bin + 1)):
708  # bin_widths.append(histogram.GetYaxis().GetBinWidth(energy_bin))
709  # if the bin isn't a boundary then there is 100% overlap
710  # and we should evaluate this for interpolation at it's centre
711  if j not in [0, (max_bin - min_bin)]:
712  spanned_fraction = 1.
713  eval_centre = histogram.GetYaxis().GetBinCenter(j)
714  # if it is then we must determine the degree of overlap with our
715  # energy window
716  else:
717  lower_limit = histogram.GetYaxis().GetBinLowEdge(energy_bin)
718  if lower_limit < energy_bins[i_e - 1]:
719  lower_limit = energy_bins[i_e - 1]
720 
721  upper_limit = histogram.GetYaxis().GetBinUpEdge(energy_bin)
722  if upper_limit > energy_bins[i_e]:
723  upper_limit = energy_bins[i_e]
724 
725  spanned_fraction = (
726  (upper_limit - lower_limit) / histogram.GetYaxis().GetBinWidth(energy_bin))
727  eval_centre = lower_limit + ((upper_limit - lower_limit) / 2.)
728  if verbose:
729  print "SUtil: sub-bin[%3i] (%3i) range spanned: %f -> %f: %2.2f %%, eval centre: %.2f" %\
730  (j, energy_bin, lower_limit, upper_limit,
731  spanned_fraction * 100., eval_centre)
732  bin_fractions.append(spanned_fraction)
733  eval_centres.append(eval_centre)
734 
735  # now loop over each time bin in the old histogram and fill into
736  # the new histogram with an appropriate weight
737  time_integrated_content = 0.
738  for time_bin in range(histogram.GetXaxis().GetNbins() + 2):
739  if interpolate:
740  # Don't try and interpolate out of range of the graph!
741  if (eval_centre < histogram.GetYaxis().GetBinCenter(1)) or \
742  (eval_centre > histogram.GetYaxis().GetBinCenter(histogram.GetYaxis().GetNbins())):
743  bin_content = 0
744  else:
745  # as we can potentially span many bins, the point at which to eval each bin at needs to be determined
746  # the 0, "S" options to Eval mean a TSpline3 object is created.
747  # Drop these arguments for linear interpolation
748  bin_content = interpolation_graphs[
749  time_bin].Eval(eval_centre, 0, "S")
750  # as we previously divide these by energy bin width to
751  # make them continous, we must remove this
752  bin_content = bin_content * histogram.GetYaxis().GetBinWidth(energy_bin)
753  else:
754  bin_content = histogram.GetBinContent(time_bin, energy_bin)
755  new_bin_content = return_histogram.GetBinContent(
756  time_bin, i_e) + (bin_content * spanned_fraction)
757  return_histogram.SetBinContent(time_bin, i_e, new_bin_content)
758  time_integrated_content += new_bin_content
759  if verbose:
760  print "SUtil: Time integrated content: %f" % time_integrated_content
761 
762  pass # end of loop over spanned energy bin
763  if verbose:
764  print "SUtil: bin fractions: ", ["%f" % b for b in bin_fractions]
765  if interpolate:
766  print "SUtil: eval centres: ", ["%f" % e for e in eval_centres]
767 
768  #assert False
769  return return_histogram
fvar< T > fabs(const fvar< T > &x)
Definition: fabs.hpp:15
def ConvertTimeEnergyFluenceToSnowglobesFormat(histogram, interpolate=False, verbose=False)
def drange(start, stop, step)
correl_xv GetYaxis() -> SetDecimals()
def alpha_function(energy, alpha, mean_energy)
def MakeGraph(points, name="graph")
def convert_mev_to_erg(lumi_in_mev)
def BookThreeFlavourHistograms(time_bins, energy_bins)
correl_xv GetXaxis() -> SetDecimals()
fvar< T > exp(const fvar< T > &x)
Definition: exp.hpp:10
def ParameterisedDataToHistograms(raw_data, parameterisation="alpha", verbose=False)
def ParseIntegratedSNDFile(model)
procfile open("FD_BRL_v0.txt")
def ParseSNDFile(model, verbose=False)
cout<< "--"<< endl;for(Int_t iP=1;iP<=hyz->GetNbinsX();iP++){for(Int_t iC=1;iC<=hyz->GetNbinsY();iC++){if(hyv->GetBinContent(iP, iC)>-999){goal_hyv-> SetBinContent(iP, iC,-(dy[iP-1][iC-1]))
def MakeLogList(start, finish, n_bins, round_to_nearest=False, second_bin_start=False, verbose=False)
def convert_erg_to_mev(lumi_in_ergs)
T log10(T number)
Definition: d0nt_math.hpp:120
assert(nhit_max >=nhit_nbins)
T max(sqlite3 *const db, std::string const &table_name, std::string const &column_name)
Definition: statistics.h:68
def CreateListOfBins(points, centred=True)
def fermi_dirac_function(energy, eta, T)