KerasModel.cxx
Go to the documentation of this file.
1 #include "NCID/func/KerasModel.h"
2 
3 #include <iostream>
4 #include <fstream>
5 #include <algorithm>
6 #include <math.h>
7 
8 std::vector<float> keras::read_1d_array(std::ifstream &fin, int cols) {
9  std::vector<float> arr;
10  float tmp_float;
11  char tmp_char;
12  fin >> tmp_char; // for '['
13  for(int n = 0; n < cols; ++n) {
14  fin >> tmp_float;
15  arr.push_back(tmp_float);
16  }
17  fin >> tmp_char; // for ']'
18  return arr;
19 }
20 
22  std::ifstream fin(fname.c_str());
23  fin >> m_depth >> m_rows >> m_cols;
24 
25  for(int d = 0; d < m_depth; ++d) {
26  std::vector< std::vector<float> > tmp_single_depth;
27  for(int r = 0; r < m_rows; ++r) {
28  std::vector<float> tmp_row = keras::read_1d_array(fin, m_cols);
29  tmp_single_depth.push_back(tmp_row);
30  }
31  data.push_back(tmp_single_depth);
32  }
33  fin.close();
34 }
35 
36 
37 void keras::LayerConv2D::load_weights(std::ifstream &fin) {
38  char tmp_char = ' ';
39  std::string tmp_str = "";
40  float tmp_float;
41  bool skip = false;
42  fin >> m_kernels_cnt >> m_depth >> m_rows >> m_cols >> m_border_mode;
43  if(m_border_mode == "["){
44  m_border_mode = "valid";
45  skip = true;
46  }
47 
48  // reading kernel weights
49  for(int k = 0; k < m_kernels_cnt; ++k) {
50  std::vector< std::vector< std::vector<float> > > tmp_depths;
51  for(int d = 0; d < m_depth; ++d) {
52  std::vector< std::vector<float> > tmp_single_depth;
53  for(int r = 0; r < m_rows; ++r) {
54  if (!skip) { fin >> tmp_char; } // for '['
55  else { skip = false; }
56  std::vector<float> tmp_row;
57  for(int c = 0; c < m_cols; ++c) {
58  fin >> tmp_float;
59  tmp_row.push_back(tmp_float);
60  }
61  fin >> tmp_char; // for ']'
62  tmp_single_depth.push_back(tmp_row);
63  }
64  tmp_depths.push_back(tmp_single_depth);
65  }
66  m_kernels.push_back(tmp_depths);
67  }
68  // reading kernel biases
69  fin >> tmp_char; // for '['
70  for(int k = 0; k < m_kernels_cnt; ++k) {
71  fin >> tmp_float;
72  m_bias.push_back(tmp_float);
73  }
74  fin >> tmp_char; // for ']'
75 
76 }
77 
78 void keras::LayerActivation::load_weights(std::ifstream &fin) {
79  fin >> m_activation_type;
80  //std::cout << "Activation type " << m_activation_type << std::endl;
81 }
82 
83 void keras::LayerMaxPooling::load_weights(std::ifstream &fin) {
84  fin >> m_pool_x >> m_pool_y;
85  //std::cout << "MaxPooling " << m_pool_x << "x" << m_pool_y << std::endl;
86 }
87 
88 void keras::LayerDense::load_weights(std::ifstream &fin) {
89  fin >> m_input_cnt >> m_neurons;
90  float tmp_float;
91  char tmp_char = ' ';
92  for(int i = 0; i < m_input_cnt; ++i) {
93  std::vector<float> tmp_n;
94  fin >> tmp_char; // for '['
95  for(int n = 0; n < m_neurons; ++n) {
96  fin >> tmp_float;
97  tmp_n.push_back(tmp_float);
98  }
99  fin >> tmp_char; // for ']'
100  m_weights.push_back(tmp_n);
101  }
102  //std::cout << "weights " << m_weights.size() << std::endl;
103  fin >> tmp_char; // for '['
104  for(int n = 0; n < m_neurons; ++n) {
105  fin >> tmp_float;
106  m_bias.push_back(tmp_float);
107  }
108  fin >> tmp_char; // for ']'
109  //std::cout << "bias " << m_bias.size() << std::endl;
110 
111 }
112 
114  : m_verbose(verbose)
115 {
116  load_weights(input_fname);
117 }
118 
119 
121 {
122  std::vector< std::vector< std::vector<float> > > im = dc->get_3d();
123 
124  size_t csize = im[0].size();
125  size_t rsize = im[0][0].size();
126  size_t size = im.size() * csize * rsize;
128  float * y_ret = out->get_1d_rw().data();
129  for(size_t i = 0, dst = 0; i < im.size(); ++i) {
130  for(size_t j = 0; j < csize; ++j) {
131  float * row = im[i][j].data();
132  for(size_t k = 0; k < rsize; ++k) {
133  y_ret[dst++] = row[k];
134  }
135  }
136  }
137 
138  return out;
139 }
140 
141 
143 {
144  std::vector< std::vector< std::vector<float> > > im = dc->get_3d();
145  std::vector< std::vector< std::vector<float> > > y_ret;
146  for(unsigned int i = 0; i < im.size(); ++i) {
147  std::vector< std::vector<float> > tmp_y;
148  for(unsigned int j = 0; j < (unsigned int)(im[0].size()/m_pool_x); ++j) {
149  tmp_y.push_back(std::vector<float>((int)(im[0][0].size()/m_pool_y), 0.0));
150  }
151  y_ret.push_back(tmp_y);
152  }
153  for(unsigned int d = 0; d < y_ret.size(); ++d) {
154  for(unsigned int x = 0; x < y_ret[0].size(); ++x) {
155  unsigned int start_x = x*m_pool_x;
156  unsigned int end_x = start_x + m_pool_x;
157  for(unsigned int y = 0; y < y_ret[0][0].size(); ++y) {
158  unsigned int start_y = y*m_pool_y;
159  unsigned int end_y = start_y + m_pool_y;
160 
161  std::vector<float> values;
162  for(unsigned int i = start_x; i < end_x; ++i) {
163  for(unsigned int j = start_y; j < end_y; ++j) {
164  values.push_back(im[d][i][j]);
165  }
166  }
167  y_ret[d][x][y] = *max_element(values.begin(), values.end());
168  }
169  }
170  }
172  out->set_data(y_ret);
173  return out;
174 }
175 
177 {
178  std::cout << "Activation " << act << " not defined!" << std::endl;
179  std::cout << "Please add its implementation before use." << std::endl;
180  exit(1);
181 }
182 
184 {
185 
186  if (dc->get_data_dim() == 3) {
187  std::vector< std::vector< std::vector<float> > > y = dc->get_3d();
188  if(m_activation_type == "relu") {
189  for(unsigned int i = 0; i < y.size(); ++i) {
190  for(unsigned int j = 0; j < y[0].size(); ++j) {
191  for(unsigned int k = 0; k < y[0][0].size(); ++k) {
192  if(y[i][j][k] < 0) y[i][j][k] = 0;
193  }
194  }
195  }
197  out->set_data(y);
198  return out;
199  }
200  else {
201  keras::missing_activation_impl(m_activation_type);
202  }
203  }
204  else if (dc->get_data_dim() == 1) { // flat data, use 1D
205  std::vector<float> y = dc->get_1d();
206  if(m_activation_type == "relu") {
207  for(unsigned int k = 0; k < y.size(); ++k) {
208  if(y[k] < 0) y[k] = 0;
209  }
210  }
211  else if(m_activation_type == "softmax") {
212  float sum = 0.0;
213  for(unsigned int k = 0; k < y.size(); ++k) {
214  y[k] = exp(y[k]);
215  sum += y[k];
216  }
217  for(unsigned int k = 0; k < y.size(); ++k) {
218  y[k] /= sum;
219  }
220  }
221  else if(m_activation_type == "sigmoid") {
222  for(unsigned int k = 0; k < y.size(); ++k) {
223  y[k] = 1/(1+exp(-y[k]));
224  }
225  }
226  else {
227  keras::missing_activation_impl(m_activation_type);
228  }
229 
231  out->set_data(y);
232  return out;
233  }
234  else { throw "data dim not supported"; }
235 
236  return dc;
237 }
238 
239 
240 // with border mode = valid
241 std::vector< std::vector<float> > keras::conv_single_depth_valid(
242  std::vector< std::vector<float> > const & im,
243  std::vector< std::vector<float> > const & k)
244 {
245  size_t k1_size = k.size(), k2_size = k[0].size();
246  unsigned int st_x = (k1_size - 1) >> 1;
247  unsigned int st_y = (k2_size - 1) >> 1;
248 
249  std::vector< std::vector<float> > y(im.size() - 2*st_x,
250  std::vector<float>(im[0].size() - 2*st_y, 0));
251 
252  for(unsigned int i = st_x; i < im.size()-st_x; ++i) {
253  for(unsigned int j = st_y; j < im[0].size()-st_y; ++j) {
254 
255  float sum = 0;
256  for(unsigned int k1 = 0; k1 < k.size(); ++k1) {
257  const float * k_data = k[k1_size-k1-1].data();
258  const float * im_data = im[i-st_x+k1].data();
259  for(unsigned int k2 = 0; k2 < k[0].size(); ++k2) {
260  sum += k_data[k2_size-k2-1] * im_data[j-st_y+k2];
261  }
262  }
263  y[i-st_x][j-st_y] = sum;
264  }
265  }
266  return y;
267 }
268 
269 
270 // with border mode = same
271 std::vector< std::vector<float> > keras::conv_single_depth_same(
272  std::vector< std::vector<float> > const & im,
273  std::vector< std::vector<float> > const & k)
274 {
275  size_t k1_size = k.size(), k2_size = k[0].size();
276  unsigned int st_x = (k1_size - 1) >> 1;
277  unsigned int st_y = (k2_size - 1) >> 1;
278 
279  size_t max_imc = im.size() - 1;
280  size_t max_imr = im[0].size() - 1;
281  std::vector< std::vector<float> > y(im.size(),
282  std::vector<float>(im[0].size(), 0));
283 
284  for(unsigned int i = 0; i < im.size(); ++i) {
285  for(unsigned int j = 0; j < im[0].size(); ++j) {
286 
287  float sum = 0;
288  for(unsigned int k1 = 0; k1 < k.size(); ++k1) {
289  const float * k_data = k[k1_size-k1-1].data();
290  const float * im_data = im[i-st_x+k1].data();
291  for(unsigned int k2 = 0; k2 < k[0].size(); ++k2) {
292  if(i+k1 < st_x) continue;
293  if(i+k1 > st_x + max_imc) continue;
294  if(j+k2 < st_y) continue;
295  if(j+k2 > st_y + max_imr) continue;
296 
297  sum += k_data[k2_size-k2-1] * im_data[j-st_y+k2];
298  }
299  }
300  y[i][j] = sum;
301  }
302  }
303  return y;
304 }
305 
306 
308 
309  unsigned int st_x = (m_kernels[0][0].size()-1) >> 1;
310  unsigned int st_y = (m_kernels[0][0][0].size()-1) >> 1;
311  std::vector< std::vector< std::vector<float> > > y_ret;
312  auto const & im = dc->get_3d();
313 
314  size_t size_x = (m_border_mode == "valid")? im[0].size() - 2 * st_x : im[0].size();
315  size_t size_y = (m_border_mode == "valid")? im[0][0].size() - 2 * st_y: im[0][0].size();
316  for(unsigned int i = 0; i < m_kernels.size(); ++i) { // depth
317  std::vector< std::vector<float> > tmp;
318  tmp.reserve(size_x);
319  for(unsigned int j = 0; j < size_x; ++j) { // rows
320  tmp.emplace_back(std::vector<float>(size_y, 0.0));
321  }
322  y_ret.push_back(tmp);
323  }
324 
325  for(unsigned int j = 0; j < m_kernels.size(); ++j) { // loop over kernels
326  for(unsigned int m = 0; m < im.size(); ++m) { // loope over image depth
327 
328  std::vector< std::vector<float> > tmp_w = (m_border_mode == "valid") ?
329  keras::conv_single_depth_valid(im[m], m_kernels[j][m]) :
330  keras::conv_single_depth_same(im[m], m_kernels[j][m]);
331 
332  for(unsigned int x = 0; x < tmp_w.size(); ++x) {
333  for(unsigned int y = 0; y < tmp_w[0].size(); ++y) {
334  y_ret[j][x][y] += tmp_w[x][y];
335  }
336  }
337  }
338 
339  for(unsigned int x = 0; x < y_ret[0].size(); ++x) {
340  for(unsigned int y = 0; y < y_ret[0][0].size(); ++y) {
341  y_ret[j][x][y] += m_bias[j];
342  }
343  }
344  }
345 
347  out->set_data(y_ret);
348  return out;
349 }
350 
352 {
353  //std::cout << "weights: input size " << m_weights.size() << std::endl;
354  //std::cout << "weights: neurons size " << m_weights[0].size() << std::endl;
355  //std::cout << "bias " << m_bias.size() << std::endl;
356  size_t size = m_weights[0].size();
357  size_t size8 = size >> 3;
358  keras::DataChunkFlat *out = new DataChunkFlat(size, 0);
359  float * y_ret = out->get_1d_rw().data();
360 
361  auto const & im = dc->get_1d();
362 
363  for(size_t j = 0; j < m_weights.size(); ++j) { // iter over input
364  const float * w = m_weights[j].data();
365  float p = im[j];
366  size_t k = 0;
367  for(size_t i = 0; i < size8; ++i) { // iter over neurons
368  y_ret[k] += w[k] * p; // vectorize if you can
369  y_ret[k+1] += w[k+1] * p;
370  y_ret[k+2] += w[k+2] * p;
371  y_ret[k+3] += w[k+3] * p;
372  y_ret[k+4] += w[k+4] * p;
373  y_ret[k+5] += w[k+5] * p;
374  y_ret[k+6] += w[k+6] * p;
375  y_ret[k+7] += w[k+7] * p;
376  k += 8;
377  }
378  while (k < size) { y_ret[k] += w[k] * p; ++k; }
379  }
380  for(size_t i = 0; i < size; ++i) { // add biases
381  y_ret[i] += m_bias[i];
382  }
383 
384  return out;
385 }
386 
387 
389 {
390  //std::cout << endl << "KerasModel compute output" << std::endl;
391  //std::cout << "Input data size:" << std::endl;
392  //dc->show_name();
393 
394  keras::DataChunk *inp = dc;
395  keras::DataChunk *out = 0;
396  for(int l = 0; l < (int)m_layers.size(); ++l) {
397  //std::cout << "Processing layer " << m_layers[l]->get_name() << std::endl;
398  out = m_layers[l]->compute_output(inp);
399 
400  //std::cout << "Input" << std::endl;
401  //inp->show_name();
402  //std::cout << "Output" << std::endl;
403  //out->show_name();
404  if(inp != dc) delete inp;
405  //delete inp;
406  inp = 0L;
407  inp = out;
408  }
409 
410  std::vector<float> flat_out = out->get_1d();
411  delete out;
412 
413  return flat_out;
414 }
415 
417 {
418  if(m_verbose) std::cout << "Reading model from " << input_fname << std::endl;
419  std::ifstream fin(input_fname.c_str());
420  std::string layer_type = "";
421  std::string tmp_str = "";
422  int tmp_int = 0;
423 
424  fin >> tmp_str >> m_layers_cnt;
425  if(m_verbose) std::cout << "Layers " << m_layers_cnt << std::endl;
426 
427  for(int layer = 0; layer < m_layers_cnt; ++layer) { // iterate over layers
428  fin >> tmp_str >> tmp_int >> layer_type;
429  if(m_verbose) std::cout << "Layer " << tmp_int << " " << layer_type << std::endl;
430 
431  Layer *l = 0L;
432  if(layer_type == "Convolution2D") {
433  l = new LayerConv2D();
434  }
435  else if(layer_type == "Activation") {
436  l = new LayerActivation();
437  }
438  else if(layer_type == "MaxPooling2D") {
439  l = new LayerMaxPooling();
440  }
441  else if(layer_type == "Flatten") {
442  l = new LayerFlatten();
443  }
444  else if(layer_type == "Dense") {
445  l = new LayerDense();
446  }
447  else if(layer_type == "Dropout") {
448  continue; // we dont need dropout layer in prediciton mode
449  }
450  if(l == 0L) {
451  std::cout << "Layer is empty, maybe it is not defined? Cannot define network." << std::endl;
452  return;
453  }
454  l->load_weights(fin);
455  m_layers.push_back(l);
456  }
457 
458  fin.close();
459 }
460 
462 {
463  for(int i = 0; i < (int)m_layers.size(); ++i) {
464  delete m_layers[i];
465  }
466 }
467 
469 {
470  int i = m_layers.size() - 1;
471  while ((i > 0) && (m_layers[i]->get_output_units() == 0)) --i;
472  return m_layers[i]->get_output_units();
473 }
TString fin
Definition: Style.C:24
void load_weights(std::ifstream &fin)
Definition: KerasModel.cxx:40
KerasModel(const std::string &input_fname)
keras::DataChunk * compute_output(keras::DataChunk *)
Definition: KerasModel.cxx:278
std::vector< std::vector< float > > conv_single_depth_same(std::vector< std::vector< float > > const &im, std::vector< std::vector< float > > const &k)
Definition: KerasModel.cxx:250
std::vector< Layer * > m_layers
Definition: KerasModel.h:215
const char * p
Definition: xmltok.h:285
int get_output_length() const
Definition: KerasModel.cxx:417
virtual void set_data(std::vector< std::vector< std::vector< float > > > const &)
Definition: KerasModel.h:38
char * dst
Definition: lz4.h:458
void load_weights(const std::string &input_fname)
Definition: KerasModel.cxx:416
virtual void load_weights(std::ifstream &fin)=0
Float_t tmp
Definition: plot.C:36
virtual std::vector< std::vector< std::vector< float > > > const & get_3d() const
Definition: KerasModel.h:37
virtual size_t get_data_dim(void) const
Definition: KerasModel.h:35
keras::DataChunk * compute_output(keras::DataChunk *)
Definition: KerasModel.cxx:322
keras::DataChunk * compute_output(keras::DataChunk *)
Definition: KerasModel.cxx:114
std::vector< float > compute_output(keras::DataChunk *dc)
Definition: KerasModel.cxx:354
const XML_Char const XML_Char * data
Definition: expat.h:268
const int cols[3]
static constexpr double L
void read_from_file(const std::string &fname)
Definition: KerasModel.cxx:24
fvar< T > exp(const fvar< T > &x)
Definition: exp.hpp:10
Float_t d
Definition: plot.C:236
const double j
Definition: BetheBloch.cxx:29
keras::DataChunk * compute_output(keras::DataChunk *)
Definition: KerasModel.cxx:174
void missing_activation_impl(const std::string &act)
Definition: KerasModel.cxx:176
std::vector< std::vector< float > > conv_single_depth_valid(std::vector< std::vector< float > > const &im, std::vector< std::vector< float > > const &k)
Definition: KerasModel.cxx:225
void load_weights(std::ifstream &fin)
Definition: KerasModel.cxx:86
OStream cout
Definition: OStream.cxx:6
keras::DataChunk * compute_output(keras::DataChunk *)
Definition: KerasModel.cxx:135
exit(0)
std::vector< float > & get_1d_rw()
Definition: KerasModel.h:89
TRandom3 r(0)
Double_t sum
Definition: plot.C:31
void load_weights(std::ifstream &fin)
Definition: KerasModel.cxx:78
void load_weights(std::ifstream &fin)
Definition: KerasModel.cxx:82
Float_t w
Definition: plot.C:20
virtual std::vector< float > const & get_1d() const
Definition: KerasModel.h:36
std::vector< float > read_1d_array(std::ifstream &fin, int cols)
Definition: KerasModel.cxx:11
enum BeamMode string