CovMxExperiment.cxx
Go to the documentation of this file.
1 /*
2  * NumuCovMx.cxx
3  *
4  * Created on: Jan 10, 2017
5  * Author: J. Wolcott <jwolcott@fnal.gov>
6  */
7 
9 
10 #include "TCanvas.h"
11 #include "TH1D.h"
12 #include "THStack.h"
13 #include "TMatrixD.h"
14 #include "TObjString.h"
15 
16 #include "OscLib/IOscCalc.h"
17 
18 
19 namespace ana
20 {
22  {
23  // the covariance matrix replaces the systematic shift approach.
24  if (!syst.IsNominal())
25  throw std::runtime_error("ExperimentWithCovMx::ChiSq(): Systematic shifts cannot be specified when fitting against covariance matrices");
26 
27  const double pot = fData.POT();
28  Eigen::ArrayXd hdata = fData.GetEigen(pot);
29 
30  std::map<Component, Eigen::ArrayXd> predictions;
31  for (const auto & mxPair : this->fCovMxInv)
32  {
33  const auto & comp = mxPair.first;
34  if (comp.flavor == Flavors::kAll && comp.current == Current::kBoth && comp.sign == Sign::kBoth)
35  {
36  // if you specified one matrix as applying to the whole prediction,
37  // why did you give me more of them??
38  if (this->fCovMxInv.size() > 1)
39  throw std::runtime_error("CovMxExperiment::ChiSq(): one component corresponds to the whole prediction but you also provided others?!");
40 
41  predictions.emplace(comp, fMC->Predict(calc).GetEigen(pot));
42  }
43  else
44  predictions.emplace(mxPair.first, fMC->PredictComponent(calc, comp.flavor, comp.current, comp.sign).GetEigen(pot));
45  }
46 
47  // will want the total prediction in a moment,
48  // but since we assume the components add to the total
49  // in the following calculations, might as well ensure it here.
50  Eigen::ArrayXd totalPred = fMC->Predict(calc).GetEigen(pot);
51  Eigen::ArrayXd diff = totalPred;
52  for (const auto & predPair : predictions)
53  diff -= predPair.second;
54  for (int bin = 0; bin < totalPred.size(); bin++)
55  {
56  if ( std::fabs(diff[bin]) > 1e-4 )
57  throw std::runtime_error("CovMxExperiment::ChiSq(): components don't sum to total prediction!");
58  }
59 
60  // add the cosmics into the prediction.
61  if(fCosmic)
62  {
63  if(fCosmicScaleError != 0)
64  {
65  const double scale = 1 + syst.GetShift(&kCosmicBkgScaleSyst) * fCosmicScaleError;
66  totalPred += fCosmic->GetEigen(pot) * scale;
67  }
68  else
69  totalPred += fCosmic->GetEigen(pot);
70 
71  }
72 
73  // first, compute the statistical uncertainty covariance matrix
74  // (which is diagonal). we'll need this repeatedly below.
75  // use the *total* prediction to get the sigma^2 here because
76  // that's the expected fluctuation size
77  Eigen::MatrixXd stat_mx(totalPred.size()-2, totalPred.size()-2);
78  for (int binNum = 1; binNum <= totalPred.size()-2; binNum++)
79  stat_mx(binNum-1, binNum-1) = totalPred[binNum];
80 
81 
82  // We need to include the statistical covariance matrix
83  // to do the chi^2 calculation or it'll be way off.
84  // we need to recalculate the sum for *every* new configuration of
85  // the IPrediction, unfortunately--
86  // and since we have to then invert it, this could be very costly.
87  // StackExchange to the rescue: somebody there wanted to solve a similar problem
88  // (where we already have the inverse of a matrix A
89  // and want to compute (A+B)^{-1] with some other invertible B):
90  // http://math.stackexchange.com/a/17780
91  // method has apparently been discovered in various forms by multiple people,
92  // but this version is due to K. Miller, Math.Mag. 54, 67 (1981)
93  //
94 
95 
96  // Strictly speaking, it's not possible to divide the prediction up
97  // into pieces and then use the covariance matrices for those pieces
98  // to compute a chi^2 because it's not possible to know which was which
99  // in the data. However, we'll cheat a little and do the following:
100  // for each component, we'll subtract the rest of the prediction
101  // and compare background-subtracted data to that component.
102  // Because we have to include the statistical covariance matrix every time,
103  // this over-counts the statistical uncertainty by a factor of
104  // roughly (N-1), where N is the number of components.
105  // Probably this isn't exactly the right statistical thing to do,
106  // but for a first approximation, we'll take an average of the chi^2s
107  // weighted by how many events they contribute to the prediction.
108 
109  double chi2 = 0;
110  for (auto it_comp = this->fCovMxInv.begin(); it_comp != this->fCovMxInv.end(); ++it_comp)
111  {
112  const auto & comp = it_comp->first;
113  const auto & compCovMxInv = it_comp->second;
114  const auto & predComp = (predictions.at(comp));
115  Eigen::ArrayXd hdata_compSub = hdata - ( totalPred - predComp );
116 
117 
118  // now recursively update the matrix
119  // by calculating the inverse of (A+B_k),
120  // where B_k is one of the N rank-1 matrices
121  // (when N is the rank of B) that B can be decomposed into.
122  // our case is even easier because we know B is diagonal
123  // (it's the statistical uncertainty covariance matrix).
124  Eigen::MatrixXd Bk(stat_mx.rows(), stat_mx.cols());
125  Eigen::MatrixXd CkInv = compCovMxInv; // here we're setting what Miller calls C_1 = A
126 
127  // the rank of the stat mx is just the number of rows:
128  // it's diagonal and the bins are orthogonal.
129  // note that we're counting from zero instead of 1
130  // for convenience in the TMatrixDs
131  for (int k = 0; k < stat_mx.rows(); k++)
132  {
133  // reset the last iteration so we can re-use
134  if (k > 0)
135  Bk(k-1, k-1) = 0;
136 
137  // set current iteration B_k
138  Bk(k, k) = stat_mx(k, k);
139 
140  // can short-circuit the trace in Miller's algorithm
141  // since our Bk come from a diagonal matrix:
142  // they're single entries on the diagonal
143  double gk = 1. / (1 + CkInv(k, k)*Bk(k, k));
144 
145  CkInv -= gk * CkInv * Bk * CkInv;
146  }
147 
148  // event-weighted chi^2
149  chi2 += predComp.sum() * Chi2CovMx(predictions.at(comp), hdata_compSub, CkInv);
150 
151  }
152 
153  // divide out the total number of events to the get the weighted chi^2
154  chi2 /= totalPred.sum();
155 
156  return chi2;
157  }
158 
159 }
160 
double Chi2CovMx(const Eigen::ArrayXd &e, const Eigen::ArrayXd &o, const Eigen::MatrixXd &covmxinv)
Definition: EigenUtils.cxx:52
Cuts and Vars for the 2020 FD DiF Study.
Definition: vars.h:6
virtual Spectrum PredictComponent(osc::IOscCalc *calc, Flavors::Flavors_t flav, Current::Current_t curr, Sign::Sign_t sign) const =0
std::optional< Spectrum > fCosmic
fvar< T > fabs(const fvar< T > &x)
Definition: fabs.hpp:15
bool IsNominal() const
Definition: SystShifts.h:43
virtual Spectrum Predict(osc::IOscCalc *calc) const =0
Simple record of shifts applied to systematic parameters.
Definition: SystShifts.h:20
const Eigen::ArrayXd & GetEigen() const
NB these don&#39;t have POT scaling. For expert high performance ops only!
Definition: Spectrum.h:188
const CosmicBkgScaleSyst kCosmicBkgScaleSyst
osc::OscCalcDumb calc
double chi2()
Double_t scale
Definition: plot.C:25
Interactions of both types.
Definition: IPrediction.h:42
virtual double ChiSq(osc::IOscCalcAdjustable *osc, const SystShifts &syst=SystShifts::Nominal()) const override
std::map< Component, Eigen::MatrixXd > fCovMxInv
#define pot
T GetShift(const ISyst *syst) const
float bin[41]
Definition: plottest35.C:14
double POT() const
Definition: Spectrum.h:219
Both neutrinos and antineutrinos.
Definition: IPrediction.h:52
All neutrinos, any flavor.
Definition: IPrediction.h:26
Float_t e
Definition: plot.C:35