Analysis Software
Documentation for sPHENIX simulation software
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
TMVAClassification_MLPnew.class.C
Go to the documentation of this file. Or view the newest version in sPHENIX GitHub for file TMVAClassification_MLPnew.class.C
1 // Class: ReadMLPnew
2 // Automatically generated by MethodBase::MakeClass
3 //
4 
5 /* configuration options =====================================================
6 
7 #GEN -*-*-*-*-*-*-*-*-*-*-*- general info -*-*-*-*-*-*-*-*-*-*-*-
8 
9 Method : MLP::MLPnew
10 TMVA Release : 4.2.1 [262657]
11 ROOT Release : 6.22/02 [398850]
12 Creator : cdean
13 Date : Tue Apr 20 16:39:06 2021
14 Host : Linux cvmfswrite02.sdcc.bnl.gov 3.10.0-957.12.2.el7.x86_64 #1 SMP Tue May 14 15:23:27 CDT 2019 x86_64 x86_64 x86_64 GNU/Linux
15 Dir : /gpfs/mnt/gpfs02/sphenix/user/cdean/scripts/HF_trigger_ML/TMVA/Odd/wCalo
16 Training events: 31922
17 Analysis type : [Classification]
18 
19 
20 #OPT -*-*-*-*-*-*-*-*-*-*-*-*- options -*-*-*-*-*-*-*-*-*-*-*-*-
21 
22 # Set by User:
23 NCycles: "400" [Number of training cycles]
24 HiddenLayers: "N+10" [Specification of hidden layer architecture]
25 NeuronType: "tanh" [Neuron activation function type]
26 EstimatorType: "CE" [MSE (Mean Square Estimator) for Gaussian Likelihood or CE(Cross-Entropy) for Bernoulli Likelihood]
27 V: "False" [Verbose output (short form of "VerbosityLevel" below - overrides the latter one)]
28 VarTransform: "N" [List of variable transformations performed before training, e.g., "D_Background,P_Signal,G,N_AllClasses" for: "Decorrelation, PCA-transformation, Gaussianisation, Normalisation, each for the given class of events ('AllClasses' denotes all events of all classes, if no class indication is given, 'All' is assumed)"]
29 H: "True" [Print method-specific help message]
30 TrainingMethod: "BP" [Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)]
31 TestRate: "5" [Test for overtraining performed at each #th epochs]
32 UseRegulator: "False" [Use regulator to avoid over-training]
33 # Default:
34 RandomSeed: "1" [Random seed for initial synapse weights (0 means unique seed for each run; default value '1')]
35 NeuronInputType: "sum" [Neuron input function type]
36 VerbosityLevel: "Default" [Verbosity level]
37 CreateMVAPdfs: "False" [Create PDFs for classifier outputs (signal and background)]
38 IgnoreNegWeightsInTraining: "False" [Events with negative weights are ignored in the training (but are included for testing and performance evaluation)]
39 LearningRate: "2.000000e-02" [ANN learning rate parameter]
40 DecayRate: "1.000000e-02" [Decay rate for learning parameter]
41 EpochMonitoring: "False" [Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)]
42 Sampling: "1.000000e+00" [Only 'Sampling' (randomly selected) events are trained each epoch]
43 SamplingEpoch: "1.000000e+00" [Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training]
44 SamplingImportance: "1.000000e+00" [ The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.]
45 SamplingTraining: "True" [The training sample is sampled]
46 SamplingTesting: "False" [The testing sample is sampled]
47 ResetStep: "50" [How often BFGS should reset history]
48 Tau: "3.000000e+00" [LineSearch "size step"]
49 BPMode: "sequential" [Back-propagation learning mode: sequential or batch]
50 BatchSize: "-1" [Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events]
51 ConvergenceImprove: "1.000000e-30" [Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)]
52 ConvergenceTests: "-1" [Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)]
53 UpdateLimit: "10000" [Maximum times of regulator update]
54 CalculateErrors: "False" [Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value]
55 WeightRange: "1.000000e+00" [Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range]
56 ##
57 
58 
59 #VAR -*-*-*-*-*-*-*-*-*-*-*-* variables *-*-*-*-*-*-*-*-*-*-*-*-
60 
61 NVar 4
62 max(track_1_IP,track_2_IP) maxTrackDCA_3D maxTrackDCA_3D track-vertex 3D DCA, max units 'F' [0.00383873376995,4.99581956863]
63 max(abs(track_1_IP_xy),abs(track_2_IP_xy)) maxTrackDCA_2D maxTrackDCA_2D track-vertex 2D DCA, max units 'F' [9.33057162911e-05,4.8996257782]
64 track_1_track_2_DCA track_1_track_2_DCA track_1_track_2_DCA track-track 3D DCA units 'F' [1.57269468559e-07,0.0499997623265]
65 INTT_meanHits INTT_meanHits INTT_meanHits INTT avg. hits units 'F' [0,93]
66 NSpec 0
67 
68 
69 ============================================================================ */
70 
71 #include <array>
72 #include <vector>
73 #include <cmath>
74 #include <string>
75 #include <iostream>
76 
77 #ifndef IClassifierReader__def
78 #define IClassifierReader__def
79 
80 class IClassifierReader {
81 
82  public:
83 
84  // constructor
86  virtual ~IClassifierReader() {}
87 
88  // return classifier response
89  virtual double GetMvaValue( const std::vector<double>& inputValues ) const = 0;
90 
91  // returns classifier status
92  bool IsStatusClean() const { return fStatusIsClean; }
93 
94  protected:
95 
96  bool fStatusIsClean;
97 };
98 
99 #endif
100 
102 
103  public:
104 
105  // constructor
106  ReadMLPnew( std::vector<std::string>& theInputVars )
107  : IClassifierReader(),
108  fClassName( "ReadMLPnew" ),
109  fNvars( 4 )
110  {
111  // the training input variables
112  const char* inputVars[] = { "max(track_1_IP,track_2_IP)", "max(abs(track_1_IP_xy),abs(track_2_IP_xy))", "track_1_track_2_DCA", "INTT_meanHits" };
113 
114  // sanity checks
115  if (theInputVars.size() <= 0) {
116  std::cout << "Problem in class \"" << fClassName << "\": empty input vector" << std::endl;
117  fStatusIsClean = false;
118  }
119 
120  if (theInputVars.size() != fNvars) {
121  std::cout << "Problem in class \"" << fClassName << "\": mismatch in number of input values: "
122  << theInputVars.size() << " != " << fNvars << std::endl;
123  fStatusIsClean = false;
124  }
125 
126  // validate input variables
127  for (size_t ivar = 0; ivar < theInputVars.size(); ivar++) {
128  if (theInputVars[ivar] != inputVars[ivar]) {
129  std::cout << "Problem in class \"" << fClassName << "\": mismatch in input variable names" << std::endl
130  << " for variable [" << ivar << "]: " << theInputVars[ivar].c_str() << " != " << inputVars[ivar] << std::endl;
131  fStatusIsClean = false;
132  }
133  }
134 
135  // initialize min and max vectors (for normalisation)
136  fVmin[0] = -1;
137  fVmax[0] = 1;
138  fVmin[1] = -1;
139  fVmax[1] = 1;
140  fVmin[2] = -1;
141  fVmax[2] = 1;
142  fVmin[3] = -1;
143  fVmax[3] = 1;
144 
145  // initialize input variable types
146  fType[0] = 'F';
147  fType[1] = 'F';
148  fType[2] = 'F';
149  fType[3] = 'F';
150 
151  // initialize constants
152  Initialize();
153 
154  // initialize transformation
155  InitTransform();
156  }
157 
158  // destructor
159  virtual ~ReadMLPnew() {
160  Clear(); // method-specific
161  }
162 
163  // the classifier response
164  // "inputValues" is a vector of input values in the same order as the
165  // variables given to the constructor
166  double GetMvaValue( const std::vector<double>& inputValues ) const override;
167 
168  private:
169 
170  // method-specific destructor
171  void Clear();
172 
173  // input variable transformation
174 
175  double fOff_1[3][4];
176  double fScal_1[3][4];
177  void InitTransform_1();
178  void Transform_1( std::vector<double> & iv, int sigOrBgd ) const;
179  void InitTransform();
180  void Transform( std::vector<double> & iv, int sigOrBgd ) const;
181 
182  // common member variables
183  const char* fClassName;
184 
185  const size_t fNvars;
186  size_t GetNvar() const { return fNvars; }
187  char GetType( int ivar ) const { return fType[ivar]; }
188 
189  // normalisation of input variables
190  double fVmin[4];
191  double fVmax[4];
192  double NormVariable( double x, double xmin, double xmax ) const {
193  // normalise to output range: [-1, 1]
194  return 2*(x - xmin)/(xmax - xmin) - 1.0;
195  }
196 
197  // type of input variable: 'F' or 'I'
198  char fType[4];
199 
200  // initialize internal variables
201  void Initialize();
202  double GetMvaValue__( const std::vector<double>& inputValues ) const;
203 
204  // private members (method specific)
205 
206  double ActivationFnc(double x) const;
207  double OutputActivationFnc(double x) const;
208 
209  double fWeightMatrix0to1[15][5]; // weight matrix from layer 0 to 1
210  double fWeightMatrix1to2[1][15]; // weight matrix from layer 1 to 2
211 
212 };
213 
215 {
216  // build network structure
217  // weight matrix from layer 0 to 1
218  fWeightMatrix0to1[0][0] = -0.371270017742686;
219  fWeightMatrix0to1[1][0] = 1.7706044423266;
220  fWeightMatrix0to1[2][0] = 8.85034476272466;
221  fWeightMatrix0to1[3][0] = 0.924328409008795;
222  fWeightMatrix0to1[4][0] = -1.63470086717199;
223  fWeightMatrix0to1[5][0] = -1.38327094346314;
224  fWeightMatrix0to1[6][0] = 0.790241163419693;
225  fWeightMatrix0to1[7][0] = 4.901736345446;
226  fWeightMatrix0to1[8][0] = 0.018716727029684;
227  fWeightMatrix0to1[9][0] = -0.948170278427112;
228  fWeightMatrix0to1[10][0] = -0.312912878308065;
229  fWeightMatrix0to1[11][0] = 1.04508957646327;
230  fWeightMatrix0to1[12][0] = -1.28950287560597;
231  fWeightMatrix0to1[13][0] = -1.77048436582237;
232  fWeightMatrix0to1[0][1] = -1.69168540540321;
233  fWeightMatrix0to1[1][1] = 0.714828096779357;
234  fWeightMatrix0to1[2][1] = 4.66267105393972;
235  fWeightMatrix0to1[3][1] = 0.959704657855793;
236  fWeightMatrix0to1[4][1] = 1.43739718535443;
237  fWeightMatrix0to1[5][1] = 1.34319766207685;
238  fWeightMatrix0to1[6][1] = -1.73412006454761;
239  fWeightMatrix0to1[7][1] = 0.103214066908003;
240  fWeightMatrix0to1[8][1] = 0.592676290122851;
241  fWeightMatrix0to1[9][1] = -0.114018136019056;
242  fWeightMatrix0to1[10][1] = 0.194105805540034;
243  fWeightMatrix0to1[11][1] = 0.624177892237752;
244  fWeightMatrix0to1[12][1] = 1.71732503026765;
245  fWeightMatrix0to1[13][1] = -2.09324808031756;
246  fWeightMatrix0to1[0][2] = -2.30489691537058;
247  fWeightMatrix0to1[1][2] = 0.0667223243786911;
248  fWeightMatrix0to1[2][2] = -0.044951481188298;
249  fWeightMatrix0to1[3][2] = 0.299313324362303;
250  fWeightMatrix0to1[4][2] = -3.76254878333605;
251  fWeightMatrix0to1[5][2] = -0.132620702387949;
252  fWeightMatrix0to1[6][2] = 0.492688487613024;
253  fWeightMatrix0to1[7][2] = -0.0872273658076029;
254  fWeightMatrix0to1[8][2] = -0.20536823401227;
255  fWeightMatrix0to1[9][2] = 1.59851054104555;
256  fWeightMatrix0to1[10][2] = 0.211968624661105;
257  fWeightMatrix0to1[11][2] = 0.811910894269588;
258  fWeightMatrix0to1[12][2] = 0.447295320986063;
259  fWeightMatrix0to1[13][2] = -0.0788413828958315;
260  fWeightMatrix0to1[0][3] = 0.138462425960674;
261  fWeightMatrix0to1[1][3] = 1.30817671579522;
262  fWeightMatrix0to1[2][3] = 0.296894156804524;
263  fWeightMatrix0to1[3][3] = -1.37924684322792;
264  fWeightMatrix0to1[4][3] = -0.300113690750916;
265  fWeightMatrix0to1[5][3] = -0.106768886795979;
266  fWeightMatrix0to1[6][3] = 0.523243735326475;
267  fWeightMatrix0to1[7][3] = 0.263751223946695;
268  fWeightMatrix0to1[8][3] = 4.39310071154711;
269  fWeightMatrix0to1[9][3] = 0.679174593843537;
270  fWeightMatrix0to1[10][3] = -3.06099647360978;
271  fWeightMatrix0to1[11][3] = 0.657959885057513;
272  fWeightMatrix0to1[12][3] = -0.972902308462804;
273  fWeightMatrix0to1[13][3] = -2.25841040863338;
274  fWeightMatrix0to1[0][4] = -3.0329522856525;
275  fWeightMatrix0to1[1][4] = -2.20972874165338;
276  fWeightMatrix0to1[2][4] = 13.1479419758704;
277  fWeightMatrix0to1[3][4] = 0.815659600799068;
278  fWeightMatrix0to1[4][4] = -4.00407814730395;
279  fWeightMatrix0to1[5][4] = -0.0676116494636533;
280  fWeightMatrix0to1[6][4] = -0.975150440233607;
281  fWeightMatrix0to1[7][4] = 4.23770153459808;
282  fWeightMatrix0to1[8][4] = 4.77875155056132;
283  fWeightMatrix0to1[9][4] = 1.01949141227115;
284  fWeightMatrix0to1[10][4] = -2.06107769438511;
285  fWeightMatrix0to1[11][4] = -2.54106696459123;
286  fWeightMatrix0to1[12][4] = 0.0687823820410542;
287  fWeightMatrix0to1[13][4] = -5.92121191119336;
288  // weight matrix from layer 1 to 2
289  fWeightMatrix1to2[0][0] = -0.621509855707168;
290  fWeightMatrix1to2[0][1] = 0.730121868385032;
291  fWeightMatrix1to2[0][2] = 2.72002615212586;
292  fWeightMatrix1to2[0][3] = -0.110318549183625;
293  fWeightMatrix1to2[0][4] = 1.27445226847816;
294  fWeightMatrix1to2[0][5] = 0.0565701746288308;
295  fWeightMatrix1to2[0][6] = -0.63427021171353;
296  fWeightMatrix1to2[0][7] = -0.509103185500456;
297  fWeightMatrix1to2[0][8] = 1.47592612258331;
298  fWeightMatrix1to2[0][9] = 0.23839821537841;
299  fWeightMatrix1to2[0][10] = 0.54206934271779;
300  fWeightMatrix1to2[0][11] = 2.09060833313788;
301  fWeightMatrix1to2[0][12] = -0.212155780718553;
302  fWeightMatrix1to2[0][13] = -0.678193913202105;
303  fWeightMatrix1to2[0][14] = -2.94224644247805;
304 }
305 
306 inline double ReadMLPnew::GetMvaValue__( const std::vector<double>& inputValues ) const
307 {
308  if (inputValues.size() != (unsigned int)4) {
309  std::cout << "Input vector needs to be of size " << 4 << std::endl;
310  return 0;
311  }
312 
313  std::array<double, 15> fWeights1 {{}};
314  std::array<double, 1> fWeights2 {{}};
315  fWeights1.back() = 1.;
316 
317  // layer 0 to 1
318  for (int o=0; o<14; o++) {
319  std::array<double, 5> buffer; // no need to initialise
320  for (int i = 0; i<5 - 1; i++) {
321  buffer[i] = fWeightMatrix0to1[o][i] * inputValues[i];
322  } // loop over i
323  buffer.back() = fWeightMatrix0to1[o][4];
324  for (int i=0; i<5; i++) {
325  fWeights1[o] += buffer[i];
326  } // loop over i
327  } // loop over o
328  for (int o=0; o<14; o++) {
329  fWeights1[o] = ActivationFnc(fWeights1[o]);
330  } // loop over o
331  // layer 1 to 2
332  for (int o=0; o<1; o++) {
333  std::array<double, 15> buffer; // no need to initialise
334  for (int i=0; i<15; i++) {
335  buffer[i] = fWeightMatrix1to2[o][i] * fWeights1[i];
336  } // loop over i
337  for (int i=0; i<15; i++) {
338  fWeights2[o] += buffer[i];
339  } // loop over i
340  } // loop over o
341  for (int o=0; o<1; o++) {
342  fWeights2[o] = OutputActivationFnc(fWeights2[o]);
343  } // loop over o
344 
345  return fWeights2[0];
346 }
347 
348 double ReadMLPnew::ActivationFnc(double x) const {
349  // fast hyperbolic tan approximation
350  if (x > 4.97) return 1;
351  if (x < -4.97) return -1;
352  float x2 = x * x;
353  float a = x * (135135.0f + x2 * (17325.0f + x2 * (378.0f + x2)));
354  float b = 135135.0f + x2 * (62370.0f + x2 * (3150.0f + x2 * 28.0f));
355  return a / b;
356 }
357 double ReadMLPnew::OutputActivationFnc(double x) const {
358  // sigmoid
359  return 1.0/(1.0+exp(-x));
360 }
361 
362 // Clean up
363 inline void ReadMLPnew::Clear()
364 {
365 }
366 inline double ReadMLPnew::GetMvaValue( const std::vector<double>& inputValues ) const
367 {
368  // classifier response value
369  double retval = 0;
370 
371  // classifier response, sanity check first
372  if (!IsStatusClean()) {
373  std::cout << "Problem in class \"" << fClassName << "\": cannot return classifier response"
374  << " because status is dirty" << std::endl;
375  }
376  else {
377  std::vector<double> iV(inputValues);
378  Transform( iV, -1 );
379  retval = GetMvaValue__( iV );
380  }
381 
382  return retval;
383 }
384 
385 //_______________________________________________________________________
387 {
388  double fMin_1[3][4];
389  double fMax_1[3][4];
390  // Normalization transformation, initialisation
391  fMin_1[0][0] = 0.0262883696705;
392  fMax_1[0][0] = 4.92083930969;
393  fScal_1[0][0] = 2.0/(fMax_1[0][0]-fMin_1[0][0]);
394  fOff_1[0][0] = fMin_1[0][0]*fScal_1[0][0]+1.;
395  fMin_1[1][0] = 0.00383873376995;
396  fMax_1[1][0] = 4.99581956863;
397  fScal_1[1][0] = 2.0/(fMax_1[1][0]-fMin_1[1][0]);
398  fOff_1[1][0] = fMin_1[1][0]*fScal_1[1][0]+1.;
399  fMin_1[2][0] = 0.00383873376995;
400  fMax_1[2][0] = 4.99581956863;
401  fScal_1[2][0] = 2.0/(fMax_1[2][0]-fMin_1[2][0]);
402  fOff_1[2][0] = fMin_1[2][0]*fScal_1[2][0]+1.;
403  fMin_1[0][1] = 0.00748753221706;
404  fMax_1[0][1] = 4.8996257782;
405  fScal_1[0][1] = 2.0/(fMax_1[0][1]-fMin_1[0][1]);
406  fOff_1[0][1] = fMin_1[0][1]*fScal_1[0][1]+1.;
407  fMin_1[1][1] = 9.33057162911e-05;
408  fMax_1[1][1] = 4.55496644974;
409  fScal_1[1][1] = 2.0/(fMax_1[1][1]-fMin_1[1][1]);
410  fOff_1[1][1] = fMin_1[1][1]*fScal_1[1][1]+1.;
411  fMin_1[2][1] = 9.33057162911e-05;
412  fMax_1[2][1] = 4.8996257782;
413  fScal_1[2][1] = 2.0/(fMax_1[2][1]-fMin_1[2][1]);
414  fOff_1[2][1] = fMin_1[2][1]*fScal_1[2][1]+1.;
415  fMin_1[0][2] = 3.39120670105e-05;
416  fMax_1[0][2] = 0.0499259270728;
417  fScal_1[0][2] = 2.0/(fMax_1[0][2]-fMin_1[0][2]);
418  fOff_1[0][2] = fMin_1[0][2]*fScal_1[0][2]+1.;
419  fMin_1[1][2] = 1.57269468559e-07;
420  fMax_1[1][2] = 0.0499997623265;
421  fScal_1[1][2] = 2.0/(fMax_1[1][2]-fMin_1[1][2]);
422  fOff_1[1][2] = fMin_1[1][2]*fScal_1[1][2]+1.;
423  fMin_1[2][2] = 1.57269468559e-07;
424  fMax_1[2][2] = 0.0499997623265;
425  fScal_1[2][2] = 2.0/(fMax_1[2][2]-fMin_1[2][2]);
426  fOff_1[2][2] = fMin_1[2][2]*fScal_1[2][2]+1.;
427  fMin_1[0][3] = 1;
428  fMax_1[0][3] = 70;
429  fScal_1[0][3] = 2.0/(fMax_1[0][3]-fMin_1[0][3]);
430  fOff_1[0][3] = fMin_1[0][3]*fScal_1[0][3]+1.;
431  fMin_1[1][3] = 0;
432  fMax_1[1][3] = 93;
433  fScal_1[1][3] = 2.0/(fMax_1[1][3]-fMin_1[1][3]);
434  fOff_1[1][3] = fMin_1[1][3]*fScal_1[1][3]+1.;
435  fMin_1[2][3] = 0;
436  fMax_1[2][3] = 93;
437  fScal_1[2][3] = 2.0/(fMax_1[2][3]-fMin_1[2][3]);
438  fOff_1[2][3] = fMin_1[2][3]*fScal_1[2][3]+1.;
439 }
440 
441 //_______________________________________________________________________
442 inline void ReadMLPnew::Transform_1( std::vector<double>& iv, int cls) const
443 {
444  // Normalization transformation
445  if (cls < 0 || cls > 2) {
446  if (2 > 1 ) cls = 2;
447  else cls = 2;
448  }
449  const int nVar = 4;
450 
451  // get indices of used variables
452 
453  // define the indices of the variables which are transformed by this transformation
454  static std::vector<int> indicesGet;
455  static std::vector<int> indicesPut;
456 
457  if ( indicesGet.empty() ) {
458  indicesGet.reserve(fNvars);
459  indicesGet.push_back( 0);
460  indicesGet.push_back( 1);
461  indicesGet.push_back( 2);
462  indicesGet.push_back( 3);
463  }
464  if ( indicesPut.empty() ) {
465  indicesPut.reserve(fNvars);
466  indicesPut.push_back( 0);
467  indicesPut.push_back( 1);
468  indicesPut.push_back( 2);
469  indicesPut.push_back( 3);
470  }
471 
472  static std::vector<double> dv;
473  dv.resize(nVar);
474  for (int ivar=0; ivar<nVar; ivar++) dv[ivar] = iv[indicesGet.at(ivar)];
475  for (int ivar=0;ivar<4;ivar++) {
476  double offset = fOff_1[cls][ivar];
477  double scale = fScal_1[cls][ivar];
478  iv[indicesPut.at(ivar)] = scale*dv[ivar]-offset;
479  }
480 }
481 
482 //_______________________________________________________________________
484 {
485  InitTransform_1();
486 }
487 
488 //_______________________________________________________________________
489 inline void ReadMLPnew::Transform( std::vector<double>& iv, int sigOrBgd ) const
490 {
491  Transform_1( iv, sigOrBgd );
492 }