Skip to content

Instantly share code, notes, and snippets.

@jiumem
Last active May 8, 2018 22:45
Show Gist options
  • Save jiumem/cb0963ca3a2196128a82 to your computer and use it in GitHub Desktop.
Save jiumem/cb0963ca3a2196128a82 to your computer and use it in GitHub Desktop.
keras for deep learning
class LossHistory(Callback):
    def __init__(self, X_train, y_train, layer_index):
        super(Callback, self).__init__()
        self.layer_index = layer_index
        if X_train.shape[0] >= 1000:
            mask = np.random.choice(X_train.shape[0], 1000)
            self.X_train_subset = X_train[mask]
            self.y_train_subset = y_train[mask]
        else:
            self.X_train_subset = X_train
            self.y_train_subset = y_train

    def on_train_begin(self, logs={}):
        self.train_batch_loss = []
        self.train_acc = []
        self.val_acc = []
        self.relu_out = []

    def on_batch_end(self, batch, logs={}):
        self.train_batch_loss.append(logs.get('loss'))

    def on_epoch_end(self, epoch, logs={}):
        self.relu_out.append(self.get_layer_out())
        val_epoch_acc = logs.get('val_acc')
        self.val_acc.append(val_epoch_acc)
        train_epoch_acc = self.model.evaluate(self.X_train_subset, self.y_train_subset,
                                              show_accuracy=True, verbose=0)[1]
        self.train_acc.append(train_epoch_acc)
        print('(train accuracy, val accuracy): (%.4f, %.4f)' % (train_epoch_acc, val_epoch_acc))

    def get_layer_out(self):
        layer_index = self.layer_index
        get_activation = theano.function([self.model.layers[0].input],
		                               self.model.layers[layer_index].get_output(train=False), allow_input_downcast=True)
        return get_activation(self.X_train_subset)
class LossHistory(Callback):
def __init__(self, X_train, y_train, layer_index):
super(Callback, self).__init__()
self.layer_index = layer_index
self.previous_model_params_ = None
if X_train.shape[0] >= 1000:
mask = np.random.choice(X_train.shape[0], 1000)
self.X_train_subset = X_train[mask]
self.y_train_subset = y_train[mask]
else:
self.X_train_subset = X_train
self.y_train_subset = y_train
def on_train_begin(self, logs={}):
#self.model_params would be ['layer1_W', 'layer2_b', 'layer2_W', 'layer2_b',...]
self.train_batch_loss = []
self.train_acc = []
self.val_acc = []
self.relu_out = []
self.model_params = []
self.gradients = []
def on_batch_end(self, batch, logs={}):
self.train_batch_loss.append(logs.get('loss'))
def on_epoch_end(self, epoch, logs={}):
current_model_params = self.get_model_params()
if not self.previous_model_params_:
self.previous_model_params_ = current_model_params
else:
gradients = [(param - prev_param) for (param, prev_param) in zip(current_model_params, previous_model_params_)]
self.gradients.append(gradients)
self.previous_model_params_ = current_model_params
self.model_params.append(current_model_params)
self.relu_out.append(self.get_layer_out())
val_epoch_acc = logs.get('val_acc')
self.val_acc.append(val_epoch_acc)
train_epoch_acc = self.model.evaluate(self.X_train_subset, self.y_train_subset,
show_accuracy=True, verbose=0)[1]
self.train_acc.append(train_epoch_acc)
print('(train accuracy, val accuracy): (%.4f, %.4f)' % (train_epoch_acc, val_epoch_acc))
def get_layer_out(self):
layer_index = self.layer_index
get_activation = theano.function([self.model.layers[0].input],
self.model.layers[layer_index].get_output(train=False), allow_input_downcast=True)
return get_activation(self.X_train_subset)
def get_model_params(self):
return [param.get_value() for param in self.model.params]
@jiumem
Copy link
Author

jiumem commented Sep 10, 2015

class LossHistory(Callback):
    def __init__(self, X_train, y_train, layer_index):
        super(Callback, self).__init__()
        self.layer_index = layer_index
        self.previous_model_params_ = None
        if X_train.shape[0] >= 1000:
            mask = np.random.choice(X_train.shape[0], 1000)
            self.X_train_subset = X_train[mask]
            self.y_train_subset = y_train[mask]
        else:
            self.X_train_subset = X_train
            self.y_train_subset = y_train

    def on_train_begin(self, logs={}):
        #self.model_params would be ['layer1_W', 'layer2_b', 'layer2_W', 'layer2_b',...]
        self.train_batch_loss = []
        self.train_acc = []
        self.val_acc = []
        self.layer_out = []
        self.model_params = []
        self.gradients = []
        self.init_params = self.get_model_params()
        self.previous_model_params_ = self.init_params

    def on_batch_end(self, batch, logs={}):
        self.train_batch_loss.append(logs.get('loss'))

    def on_epoch_end(self, epoch, logs={}):
        #use W' = W + dW to calculate the gradients
        current_model_params = self.get_model_params()
        gradients = [(param - prev_param) for (param, prev_param) in zip(current_model_params,
                                                                         self.previous_model_params_)]
        self.gradients.append(gradients)
        self.previous_model_params_ = current_model_params
        self.model_params.append(current_model_params)

        self.layer_out.append(self.get_layer_out())

        val_epoch_acc = logs.get('val_acc')
        self.val_acc.append(val_epoch_acc)
        train_epoch_acc = self.model.evaluate(self.X_train_subset, self.y_train_subset,
                                              show_accuracy=True, verbose=0)[1]
        self.train_acc.append(train_epoch_acc)
        print('(train accuracy, val accuracy): (%.4f, %.4f)' % (train_epoch_acc, val_epoch_acc))

    def get_layer_out(self):
        layer_index = self.layer_index
        get_activation = theano.function([self.model.layers[0].input],
                                       self.model.layers[layer_index].get_output(train=False), allow_input_downcast=True)
        return get_activation(self.X_train_subset)

    def get_model_params(self):
        return [param.get_value() for param in self.model.params]

minitor a training NN through: init_params, gradiens, model_params, train_batch_loss, train_epoch_acc, val_epoch_acc

@jiumem
Copy link
Author

jiumem commented Sep 23, 2015

class EarlyStop(Callback):
    def __init__(self, patience=0, verbose=1, nb_classes=2, people_test=None, robots_test=[]):
        super(Callback, self).__init__()

        self.patience = patience
        self.wait = 0
        self.best_score = -1.
        self.best_model = None
        self.verbose = verbose
        self.nb_classes = nb_classes
        self.people_test = people_test
        self.robots_test = [robot_test for robot_test in robots_test]
        self.people_acc = 0.
        self.robots_acc = []

    def on_epoch_end(self, epoch, logs={}):
        score = self.score(self.model)
        current = score[0]
        if current > self.best_score:
            self.best_score = current
            self.best_model = self.model
            self.people_acc = score[1]
            self.robots_acc = score[2]
            self.wait = 0
            if self.verbose > 0:
                print('---current best score: %.3f' % current)
        else:
            if self.wait >= self.patience:
                if self.verbose > 0:
                    print("Epoch %d: early stopping" % (epoch))
                self.model.stop_training = True
            self.wait += 1

    def score(self, model):
        people_acc = model.evaluate(self.people_test,
                                np_utils.to_categorical([1 for i in xrange(len(self.people_test))], self.nb_classes),
                       show_accuracy=True, verbose=0)[1]
        robots_acc = []
        for robot_test in self.robots_test:
            robot_acc = model.evaluate(robot_test,
                                    np_utils.to_categorical([0 for i in xrange(len(robot_test))], self.nb_classes),
                                       show_accuracy=True, verbose=0)[1]
            robots_acc.append(robot_acc)
        if people_acc <= 0.93:
            return [0., people_acc, robots_acc]
        return [sum(robots_acc) / len(robots_acc), people_acc, robots_acc]

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment