4 from __future__
import print_function
12 from keras.models
import model_from_json, load_model
14 from keras.utils
import np_utils
22 from multi_gpu
import make_parallel
25 hf = h5py.File(dataset,
'r') 27 total_count = n1.shape[0]
30 def train_model(model, dataset, validation_ratio=0.2, batch_size=64):
31 with h5py.File(dataset,
"r") as data: 33 total_ids = range(0, total_count) 34 total_ids = np.random.permutation(total_ids) 35 train_total_ids = total_ids[0:int((1-validation_ratio)*total_count)] 36 test_total_ids = total_ids[int((1-validation_ratio)*total_count):] 39 data=data, sample_ids=train_total_ids) 41 data=data, sample_ids=test_total_ids) 43 history = model.fit_generator(generator=training_sequence_generator, 44 validation_data=validation_sequence_generator, 45 samples_per_epoch=len(train_total_ids), 46 nb_val_samples=len(test_total_ids), 54 if not os.path.exists(directory):
55 os.makedirs(directory)
58 with
open(directory +
'history.pickle',
'wb')
as handle:
59 pickle.dump(history.history, handle, protocol=2)
60 print(
"The training/testing logs saved")
63 model_json = model.to_json()
64 with
open(directory +
"model.json",
"w")
as json_file:
65 json_file.write(model_json)
66 print(
"The arch saved")
69 model.save_weights(directory +
"model_weights.h5")
70 model.save(directory +
'model_4recover.h5')
71 print(
"The weights and model saved")
79 opt = SGD(lr=learning_rate, momentum=momentum, decay=decay_rate, nesterov=
False)
80 model.compile(loss=
'categorical_crossentropy',optimizer=opt,metrics=[
'acc',
'top_k_categorical_accuracy'])
def train_model(model, dataset, validation_ratio=0.2, batch_size=64)
def produce_seq(batch_size, data, sample_ids)
procfile open("FD_BRL_v0.txt")