Functions | Variables
regression Namespace Reference

Functions

def regression (outdim=1, hparams={}, input_dims=(151, 141))
 
def validate (model, generator, steps)
 
def callbacks (model_name, group, tensorboard=True)
 
def parse_args ()
 
def generate_name (basename, args)
 

Variables

string DATA_FORMAT = 'channels_last'
 

Function Documentation

def regression.callbacks (   model_name,
  group,
  tensorboard = True 
)
Creates callbacks to be used during training.

# Arguments
    model_name (str): name for the model that's being trained
    group (str): group of experiments that this model is part of
    tensorboard (bool): whether to run tensorboard or not

# Returns
    (list) callbacks

Definition at line 123 of file regression.py.

References novadaq::HexUtils.format().

123 def callbacks(model_name, group, tensorboard=True):
124  """
125  Creates callbacks to be used during training.
126 
127  # Arguments
128  model_name (str): name for the model that's being trained
129  group (str): group of experiments that this model is part of
130  tensorboard (bool): whether to run tensorboard or not
131 
132  # Returns
133  (list) callbacks
134 
135  """
136  weights_path = os.path.join(WEIGHTS_DIR, model_name)
137  logs_path = os.path.join(LOG_DIR, '{}.log'.format(model_name))
138  tensorboard_path = os.path.join(TENSORBOARD_DIR, group)
139  try:
140  os.makedir(tensorboard_path)
141  except OSError:
142  pass
143 
144  tensorboard_path = os.path.join(tensorboard_path, model_name)
145 
146  early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.1,
147  patience=5, verbose=0, mode='auto')
148  model_saver = ModelCheckpoint(weights_path,
149  monitor='val_loss',
150  verbose=0,
151  save_best_only=True,
152  mode='auto')
153  reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=4,
154  min_lr=0.000001, epsilon=0.4)
155  csv_logger = CSVLogger(logs_path)
156  model_callbacks = [early_stopping, model_saver, reduce_lr, csv_logger]
157 
158  if K.backend() == 'tensorflow' and tensorboard:
159  model_callbacks.append(TensorBoard(log_dir=tensorboard_path,
160  write_graph=False,
161  write_images=False))
162 
163  return model_callbacks
164 
165 
def callbacks(model_name, group, tensorboard=True)
Definition: regression.py:123
std::string format(const int32_t &value, const int &ndigits=8)
Definition: HexUtils.cpp:14
def regression.generate_name (   basename,
  args 
)
Generates a unique file name for a model that includes argument settings.

# Returns
    (str) file name

Definition at line 192 of file regression.py.

References novadaq::HexUtils.format(), and runNovaSAM.str.

192 def generate_name(basename, args):
193  """
194  Generates a unique file name for a model that includes argument settings.
195 
196  # Returns
197  (str) file name
198  """
199  name = basename
200  for key in args:
201  name += '_{}_{}'.format(key, args.get(key))
202  name += str(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
203  return name
def generate_name(basename, args)
Definition: regression.py:192
std::string format(const int32_t &value, const int &ndigits=8)
Definition: HexUtils.cpp:14
def regression.parse_args ( )
Parse training arguments.

# Returns:
    l2 (float): l2 norm multiplier
    fc_l2 (float): l2 norm multiplier for fully connected layers
    dropout (float): dropout applied to fully connected layers
    optimizer (str): 'adam' or 'sgd'
    lr (float): learning rate
    batch_size (int): training batch size

Definition at line 166 of file regression.py.

References vars.

167  """
168  Parse training arguments.
169 
170  # Returns:
171  l2 (float): l2 norm multiplier
172  fc_l2 (float): l2 norm multiplier for fully connected layers
173  dropout (float): dropout applied to fully connected layers
174  optimizer (str): 'adam' or 'sgd'
175  lr (float): learning rate
176  batch_size (int): training batch size
177 
178  """
179  parser = argparse.ArgumentParser()
180  parser.add_argument("--l2", default=0., type=float)
181  parser.add_argument("--fc_l2", default=0., type=float)
182  parser.add_argument("--dropout", default=0., type=float)
183  parser.add_argument("--optimizer", default='adam', type=str)
184  parser.add_argument("--lr", default=0.001, type=float)
185  parser.add_argument("--batch_size", default=64, type=int)
186  parser.add_argument("--filter_number", default=32, type=int,
187  help="Number of Conv filters for each layer.")
188  args = parser.parse_args()
189  return vars(args)
190 
191 
const std::map< std::pair< std::string, std::string >, Variable > vars
def parse_args()
Definition: regression.py:166
def regression.regression (   outdim = 1,
  hparams = {},
  input_dims = (151, 141) 
)
Creates the regression CNN as a compiled Keras model.

# Arguments
    outdim (int): the dimensionality of the regression output
    hparams (dict): dictionary of hyper# Arguments, can be empty
    input_dims (tuple(int)): the dimensionality of the input

# Returns
    (keras.Model)

Definition at line 17 of file regression.py.

References novadaq::HexUtils.format(), and print.

17 def regression(outdim=1, hparams={}, input_dims=(151, 141)):
18  """
19  Creates the regression CNN as a compiled Keras model.
20 
21  # Arguments
22  outdim (int): the dimensionality of the regression output
23  hparams (dict): dictionary of hyper# Arguments, can be empty
24  input_dims (tuple(int)): the dimensionality of the input
25 
26  # Returns
27  (keras.Model)
28  """
29  optimizer = hparams.get('optimizer', 'adam')
30  lr = hparams.get('lr', 0.001)
31  dropout = hparams.get('dropout', None)
32  filter_number = hparams.get('filter_number', 32)
33  conv_args = lambda: dict(kernel_regularizer=l2(hparams.get('l2', 0.)),
34  bias_regularizer=l2(hparams.get('l2', 0.)),
35  data_format=DATA_FORMAT)
36  pool_args = lambda: dict(data_format=DATA_FORMAT)
37 
38  input_x = Input(shape=input_dims)
39  input_y = Input(shape=input_dims)
40 
41  img_shape = input_dims + (1,) if K.backend() == 'tensorflow' else (1,) + input_dims
42  input_image_x = Reshape(target_shape=img_shape)(input_x)
43  input_image_y = Reshape(target_shape=img_shape)(input_y)
44 
45  def inception(x, name="inception"):
46  branch11_x = Conv2D(filter_number, (1, 1), activation='relu', padding='same', name='branch11_{}'.format(name), **conv_args())(x)
47  branch11_x = Conv2D(filter_number, (5, 5), activation='relu', padding='same', name='branch12_{}'.format(name), **conv_args())(branch11_x)
48  branch12_x = Conv2D(filter_number, (1, 1), activation='relu', padding='same', name='branch21_{}'.format(name), **conv_args())(x)
49  branch12_x = Conv2D(filter_number, (3, 3), activation='relu', padding='same', name='branch22_{}'.format(name), **conv_args())(branch12_x)
50  branch13_x = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='branch3mp_{}'.format(name), **pool_args())(x)
51  branch13_x = Conv2D(filter_number, (1, 1), padding='same', activation='relu', name='branch31_{}'.format(name), **conv_args())(branch13_x)
52  branch14_x = Conv2D(filter_number, (1, 1), padding='same', activation='relu', name='branch32_{}'.format(name), **conv_args())(x)
53 
54  x = Concatenate(axis=1, name='Concatenate_{}'.format(name))([branch11_x, branch12_x, branch13_x, branch14_x])
55  return x
56 
57  def subnet(x, name):
58  def input(x, name='Input'):
59  x = Conv2D(filter_number, (7, 7), activation='relu', strides=(2, 2), name=name + 'Conv1', **conv_args())(x)
60  x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name=name + 'Pool1', **pool_args())(x)
61  x = Conv2D(filter_number, (1, 1), activation='relu', name=name + 'Conv2', **conv_args())(x)
62  x = Conv2D(filter_number, (3, 3), activation='relu', name=name + 'Conv3', **conv_args())(x)
63  x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name=name + 'Pool2', **pool_args())(x)
64  return x
65 
66  x = input(x, name=name + 'Input')
67  x = inception(x, name=name + 'Inception1')
68  x = inception(x, name=name + 'Inception2')
69  x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name=name + 'MaxPool', **pool_args())(x)
70  return x
71 
72  x = subnet(input_image_x, name='x')
73  y = subnet(input_image_y, name='y')
74 
75  top = Concatenate(axis=-1 if K.backend()=='tensorflow' else 1)([x, y])
76 
77  top = inception(top, name='topInception')
78  top = AveragePooling2D(pool_size=(7, 7))(top)
79 
80  top = Flatten()(top)
81  input_vtx = Input(shape=(2,))
82  out = Concatenate(axis=1)([top, input_vtx])
83  if dropout:
84  out = Dropout(dropout)(out)
85  out = Dense(outdim)(out)
86 
87  model = Model([input_x, input_y, input_vtx], out)
88 
89  if optimizer=='adam':
90  opt = Adam(lr=lr)
91  else:
92  opt = SGD(lr=lr, momentum=0.9)
93  model.compile(loss='mean_absolute_percentage_error',
94  optimizer=opt)
95  print(model.summary())
96 
97  return model
98 
99 
def regression(outdim=1, hparams={}, input_dims=(151, 141))
Definition: regression.py:17
rosenbrock_model_namespace::rosenbrock_model Model
bool print
std::string format(const int32_t &value, const int &ndigits=8)
Definition: HexUtils.cpp:14
def regression.validate (   model,
  generator,
  steps 
)
Run validation pipeline.

# Arguments
    model (keras.model): model to make predictions from
    generator (generator): yields (input, output) pairs
    steps (int): number of times that generator will be called for

# Returns
    (tuple(numpy.array)) targets and predictions

Definition at line 100 of file regression.py.

References next(), and PandAna.Demos.demo1.range.

Referenced by TEST().

100 def validate(model, generator, steps):
101  """
102  Run validation pipeline.
103 
104  # Arguments
105  model (keras.model): model to make predictions from
106  generator (generator): yields (input, output) pairs
107  steps (int): number of times that generator will be called for
108 
109  # Returns
110  (tuple(numpy.array)) targets and predictions
111  """
112  y, yhat = [], []
113  for i in range(steps):
114  x_batch, y_batch = next(generator)
115  yhat_batch = model.predict(x_batch)
116  y.append(y_batch)
117  yhat.append(yhat_batch)
118  targets, predictions = np.concatenate(y), np.concatenate(yhat)
119 
120  return targets, predictions
121 
122 
void next()
Definition: show_event.C:84
def validate(model, generator, steps)
Definition: regression.py:100

Variable Documentation

string regression.DATA_FORMAT = 'channels_last'

Definition at line 14 of file regression.py.