9 std::vector<float> arr;
15 arr.push_back(tmp_float);
22 std::ifstream
fin(fname.c_str());
26 std::vector< std::vector<float> > tmp_single_depth;
29 tmp_single_depth.push_back(tmp_row);
31 data.push_back(tmp_single_depth);
42 fin >> m_kernels_cnt >> m_depth >> m_rows >> m_cols >> m_border_mode;
43 if(m_border_mode ==
"["){
44 m_border_mode =
"valid";
49 for(
int k = 0; k < m_kernels_cnt; ++k) {
50 std::vector< std::vector< std::vector<float> > > tmp_depths;
52 std::vector< std::vector<float> > tmp_single_depth;
54 if (!skip) { fin >> tmp_char; }
55 else { skip =
false; }
56 std::vector<float> tmp_row;
59 tmp_row.push_back(tmp_float);
62 tmp_single_depth.push_back(tmp_row);
64 tmp_depths.push_back(tmp_single_depth);
66 m_kernels.push_back(tmp_depths);
70 for(
int k = 0; k < m_kernels_cnt; ++k) {
72 m_bias.push_back(tmp_float);
79 fin >> m_activation_type;
84 fin >> m_pool_x >> m_pool_y;
89 fin >> m_input_cnt >> m_neurons;
92 for(
int i = 0;
i < m_input_cnt; ++
i) {
93 std::vector<float> tmp_n;
95 for(
int n = 0;
n < m_neurons; ++
n) {
97 tmp_n.push_back(tmp_float);
100 m_weights.push_back(tmp_n);
104 for(
int n = 0;
n < m_neurons; ++
n) {
106 m_bias.push_back(tmp_float);
122 std::vector< std::vector< std::vector<float> > > im = dc->
get_3d();
124 size_t csize = im[0].size();
125 size_t rsize = im[0][0].size();
126 size_t size = im.size() * csize * rsize;
129 for(
size_t i = 0,
dst = 0;
i < im.size(); ++
i) {
130 for(
size_t j = 0;
j < csize; ++
j) {
131 float *
row = im[
i][
j].data();
132 for(
size_t k = 0; k < rsize; ++k) {
133 y_ret[
dst++] = row[k];
144 std::vector< std::vector< std::vector<float> > > im = dc->
get_3d();
145 std::vector< std::vector< std::vector<float> > > y_ret;
146 for(
unsigned int i = 0;
i < im.size(); ++
i) {
147 std::vector< std::vector<float> > tmp_y;
148 for(
unsigned int j = 0;
j < (
unsigned int)(im[0].
size()/m_pool_x); ++
j) {
149 tmp_y.push_back(std::vector<float>((
int)(im[0][0].
size()/m_pool_y), 0.0));
151 y_ret.push_back(tmp_y);
153 for(
unsigned int d = 0;
d < y_ret.size(); ++
d) {
154 for(
unsigned int x = 0;
x < y_ret[0].size(); ++
x) {
155 unsigned int start_x =
x*m_pool_x;
156 unsigned int end_x = start_x + m_pool_x;
157 for(
unsigned int y = 0;
y < y_ret[0][0].size(); ++
y) {
158 unsigned int start_y =
y*m_pool_y;
159 unsigned int end_y = start_y + m_pool_y;
161 std::vector<float>
values;
162 for(
unsigned int i = start_x;
i < end_x; ++
i) {
163 for(
unsigned int j = start_y;
j < end_y; ++
j) {
164 values.push_back(im[
d][
i][
j]);
167 y_ret[
d][
x][
y] = *max_element(values.begin(), values.end());
172 out->set_data(y_ret);
187 std::vector< std::vector< std::vector<float> > >
y = dc->
get_3d();
188 if(m_activation_type ==
"relu") {
189 for(
unsigned int i = 0;
i < y.size(); ++
i) {
190 for(
unsigned int j = 0;
j < y[0].size(); ++
j) {
191 for(
unsigned int k = 0; k < y[0][0].size(); ++k) {
192 if(y[
i][
j][k] < 0) y[
i][
j][k] = 0;
205 std::vector<float>
y = dc->
get_1d();
206 if(m_activation_type ==
"relu") {
207 for(
unsigned int k = 0; k < y.size(); ++k) {
208 if(y[k] < 0) y[k] = 0;
211 else if(m_activation_type ==
"softmax") {
213 for(
unsigned int k = 0; k < y.size(); ++k) {
217 for(
unsigned int k = 0; k < y.size(); ++k) {
221 else if(m_activation_type ==
"sigmoid") {
222 for(
unsigned int k = 0; k < y.size(); ++k) {
223 y[k] = 1/(1+
exp(-y[k]));
234 else {
throw "data dim not supported"; }
242 std::vector< std::vector<float> >
const & im,
243 std::vector< std::vector<float> >
const & k)
245 size_t k1_size = k.size(), k2_size = k[0].size();
246 unsigned int st_x = (k1_size - 1) >> 1;
247 unsigned int st_y = (k2_size - 1) >> 1;
249 std::vector< std::vector<float> >
y(im.size() - 2*st_x,
250 std::vector<float>(im[0].size() - 2*st_y, 0));
252 for(
unsigned int i = st_x;
i < im.size()-st_x; ++
i) {
253 for(
unsigned int j = st_y;
j < im[0].size()-st_y; ++
j) {
256 for(
unsigned int k1 = 0; k1 < k.size(); ++k1) {
257 const float * k_data = k[k1_size-k1-1].data();
258 const float * im_data = im[
i-st_x+k1].data();
259 for(
unsigned int k2 = 0; k2 < k[0].size(); ++k2) {
260 sum += k_data[k2_size-k2-1] * im_data[
j-st_y+k2];
263 y[
i-st_x][
j-st_y] =
sum;
272 std::vector< std::vector<float> >
const & im,
273 std::vector< std::vector<float> >
const & k)
275 size_t k1_size = k.size(), k2_size = k[0].size();
276 unsigned int st_x = (k1_size - 1) >> 1;
277 unsigned int st_y = (k2_size - 1) >> 1;
279 size_t max_imc = im.size() - 1;
280 size_t max_imr = im[0].size() - 1;
281 std::vector< std::vector<float> >
y(im.size(),
282 std::vector<float>(im[0].size(), 0));
284 for(
unsigned int i = 0;
i < im.size(); ++
i) {
285 for(
unsigned int j = 0;
j < im[0].size(); ++
j) {
288 for(
unsigned int k1 = 0; k1 < k.size(); ++k1) {
289 const float * k_data = k[k1_size-k1-1].data();
290 const float * im_data = im[
i-st_x+k1].data();
291 for(
unsigned int k2 = 0; k2 < k[0].size(); ++k2) {
292 if(
i+k1 < st_x)
continue;
293 if(
i+k1 > st_x + max_imc)
continue;
294 if(
j+k2 < st_y)
continue;
295 if(
j+k2 > st_y + max_imr)
continue;
297 sum += k_data[k2_size-k2-1] * im_data[
j-st_y+k2];
309 unsigned int st_x = (m_kernels[0][0].size()-1) >> 1;
310 unsigned int st_y = (m_kernels[0][0][0].size()-1) >> 1;
311 std::vector< std::vector< std::vector<float> > > y_ret;
312 auto const & im = dc->
get_3d();
314 size_t size_x = (m_border_mode ==
"valid")? im[0].
size() - 2 * st_x : im[0].size();
315 size_t size_y = (m_border_mode ==
"valid")? im[0][0].
size() - 2 * st_y: im[0][0].size();
316 for(
unsigned int i = 0;
i < m_kernels.size(); ++
i) {
317 std::vector< std::vector<float> >
tmp;
319 for(
unsigned int j = 0;
j < size_x; ++
j) {
320 tmp.emplace_back(std::vector<float>(size_y, 0.0));
322 y_ret.push_back(tmp);
325 for(
unsigned int j = 0;
j < m_kernels.size(); ++
j) {
326 for(
unsigned int m = 0;
m < im.size(); ++
m) {
328 std::vector< std::vector<float> > tmp_w = (m_border_mode ==
"valid") ?
332 for(
unsigned int x = 0;
x < tmp_w.size(); ++
x) {
333 for(
unsigned int y = 0;
y < tmp_w[0].size(); ++
y) {
334 y_ret[
j][
x][
y] += tmp_w[
x][
y];
339 for(
unsigned int x = 0;
x < y_ret[0].size(); ++
x) {
340 for(
unsigned int y = 0;
y < y_ret[0][0].size(); ++
y) {
341 y_ret[
j][
x][
y] += m_bias[
j];
356 size_t size = m_weights[0].size();
357 size_t size8 = size >> 3;
361 auto const & im = dc->
get_1d();
363 for(
size_t j = 0;
j < m_weights.size(); ++
j) {
364 const float *
w = m_weights[
j].data();
367 for(
size_t i = 0;
i < size8; ++
i) {
368 y_ret[k] += w[k] *
p;
369 y_ret[k+1] += w[k+1] *
p;
370 y_ret[k+2] += w[k+2] *
p;
371 y_ret[k+3] += w[k+3] *
p;
372 y_ret[k+4] += w[k+4] *
p;
373 y_ret[k+5] += w[k+5] *
p;
374 y_ret[k+6] += w[k+6] *
p;
375 y_ret[k+7] += w[k+7] *
p;
378 while (k < size) { y_ret[k] += w[k] *
p; ++k; }
380 for(
size_t i = 0;
i <
size; ++
i) {
381 y_ret[
i] += m_bias[
i];
404 if(inp != dc)
delete inp;
410 std::vector<float> flat_out = out->
get_1d();
419 std::ifstream
fin(input_fname.c_str());
428 fin >> tmp_str >> tmp_int >> layer_type;
432 if(layer_type ==
"Convolution2D") {
435 else if(layer_type ==
"Activation") {
438 else if(layer_type ==
"MaxPooling2D") {
441 else if(layer_type ==
"Flatten") {
444 else if(layer_type ==
"Dense") {
447 else if(layer_type ==
"Dropout") {
451 std::cout <<
"Layer is empty, maybe it is not defined? Cannot define network." <<
std::endl;
471 while ((i > 0) && (
m_layers[i]->get_output_units() == 0)) --
i;
void load_weights(std::ifstream &fin)
KerasModel(const std::string &input_fname)
keras::DataChunk * compute_output(keras::DataChunk *)
std::vector< std::vector< float > > conv_single_depth_same(std::vector< std::vector< float > > const &im, std::vector< std::vector< float > > const &k)
std::vector< Layer * > m_layers
int get_output_length() const
virtual void set_data(std::vector< std::vector< std::vector< float > > > const &)
void load_weights(const std::string &input_fname)
virtual void load_weights(std::ifstream &fin)=0
virtual std::vector< std::vector< std::vector< float > > > const & get_3d() const
virtual size_t get_data_dim(void) const
keras::DataChunk * compute_output(keras::DataChunk *)
keras::DataChunk * compute_output(keras::DataChunk *)
std::vector< float > compute_output(keras::DataChunk *dc)
const XML_Char const XML_Char * data
static constexpr double L
void read_from_file(const std::string &fname)
fvar< T > exp(const fvar< T > &x)
keras::DataChunk * compute_output(keras::DataChunk *)
void missing_activation_impl(const std::string &act)
std::vector< std::vector< float > > conv_single_depth_valid(std::vector< std::vector< float > > const &im, std::vector< std::vector< float > > const &k)
void load_weights(std::ifstream &fin)
keras::DataChunk * compute_output(keras::DataChunk *)
std::vector< float > & get_1d_rw()
void load_weights(std::ifstream &fin)
void load_weights(std::ifstream &fin)
virtual std::vector< float > const & get_1d() const
std::vector< float > read_1d_array(std::ifstream &fin, int cols)