Skip to content

Instantly share code, notes, and snippets.

@cosmincatalin
Created March 20, 2018 14:52
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save cosmincatalin/4280c48ab545e18956b6461482dfb0db to your computer and use it in GitHub Desktop.
Save cosmincatalin/4280c48ab545e18956b6461482dfb0db to your computer and use it in GitHub Desktop.
The first part of a SageMaker script for building an MXNet model that counts shapes in an image.
import logging
from pickle import load
import mxnet as mx
import numpy as np
from mxnet import autograd, nd, gluon
from mxnet.gluon import Trainer
from mxnet.gluon.loss import L2Loss
from mxnet.gluon.nn import Conv2D, MaxPool2D, Dropout, Flatten, Dense, Sequential
from mxnet.initializer import Xavier
logging.basicConfig(level=logging.INFO)
def train(hyperparameters, channel_input_dirs, num_gpus):
batch_size = hyperparameters.get("batch_size", 64)
epochs = hyperparameters.get("epochs", 3)
mx.random.seed(42)
training_dir = channel_input_dirs['training']
logging.info("Loading data from {}".format(training_dir))
with open("{}/train/data.p".format(training_dir), "rb") as pickle:
train_nd = load(pickle)
with open("{}/validation/data.p".format(training_dir), "rb") as pickle:
validation_nd = load(pickle)
train_data = DataLoader(train_nd, batch_size, shuffle=True)
validation_data = DataLoader(validation_nd, batch_size, shuffle=True)
net = Sequential()
with net.name_scope():
net.add(Conv2D(channels=32, kernel_size=(3, 3),
padding=0, activation="relu"))
net.add(Conv2D(channels=32, kernel_size=(3, 3),
padding=0, activation="relu"))
net.add(MaxPool2D(pool_size=(2, 2)))
net.add(Dropout(.25))
net.add(Flatten())
net.add(Dense(1))
ctx = mx.gpu() if num_gpus > 0 else mx.cpu()
net.collect_params().initialize(Xavier(magnitude=2.24), ctx=ctx)
loss = L2Loss()
trainer = Trainer(net.collect_params(), optimizer="adam")
smoothing_constant = .01
for e in range(epochs):
moving_loss = 0
for i, (data, label) in enumerate(train_data):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
with autograd.record():
output = net(data)
loss_result = loss(output, label)
loss_result.backward()
trainer.step(batch_size)
curr_loss = nd.mean(loss_result).asscalar()
if (i == 0) and (e == 0):
moving_loss = curr_loss
else:
moving_loss = (1 - smoothing_constant) * moving_loss + \
smoothing_constant * curr_loss
trn_total, trn_detected = calc_perf(net, ctx, train_data)
val_total, val_detected = calc_perf(net, ctx, validation_data)
log = "Epoch: {} loss: {:0.4f} perf_test: {:0.2f} perf_val: {:0.2f}" \
.format(e, moving_loss,
trn_detected / trn_total,
val_detected / val_total)
logging.info(log)
return net
def calc_perf(model, ctx, data_iter):
raw_predictions = np.array([])
rounded_predictions = np.array([])
actual_labels = np.array([])
for i, (data, label) in enumerate(data_iter):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
output = model(data)
predictions = nd.round(output)
raw_predictions = np.append(raw_predictions,
output.asnumpy().squeeze())
rounded_predictions = np.append(rounded_predictions,
predictions.asnumpy().squeeze())
actual_labels = np.append(actual_labels,
label.asnumpy().squeeze())
results = np.concatenate((raw_predictions.reshape((-1, 1)),
rounded_predictions.reshape((-1, 1)),
actual_labels.reshape((-1, 1))), axis=1)
detected = 0
i = -1
for i in range(int(results.size / 3)):
if results[i][1] == results[i][2]:
detected += 1
return i + 1, detected
def save(net, model_dir):
y = net(mx.sym.var("data"))
y.save("{}/model.json".format(model_dir))
net.collect_params().save("{}/model.params".format(model_dir))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment