Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Gluon hello world
	from mxnet import gluon

	import mxnet as mx
	import numpy as np
	x_input = mx.nd.empty((1, 5), mx.cpu())
	x_input[:] = np.array([[1,2,3,4,5]], np.float32)

	y_input = mx.nd.empty((1, 5), mx.cpu())
	y_input[:] = np.array([[10, 15, 20, 22.5, 25]], np.float32)


	net = gluon.nn.Sequential()
	with net.name_scope():
	    net.add(gluon.nn.Dense(16, activation="relu"))
	    net.add(gluon.nn.Dense(len(y_input)))


	net.collect_params().initialize(mx.init.Normal(), ctx=mx.cpu())


	softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
	trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': .1})


	n_epochs = 10

	for e in range(n_epochs):
	    for i in range(len(x_input)):
	        input = x_input[i]
	        target = y_input[i]
	        with mx.autograd.record():
	            output = net(input)
	            loss = softmax_cross_entropy(output, target)
	            loss.backward()
	        trainer.step(input.shape[0])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment