Want to expose your machine learning model to the network? Use io.serve
:
import * as sm from '@shumai/shumai'
import { model } from './model'
sm.io.serve({
run_model: (_, input) => {
return model(input)
}
})
A client can use io.tfetch
(basically fetch
but for Tensors):
import * as sm from '@shumai/shumai'
const input = sm.randn([128])
const url = 'localhost:3000/run_model'
const output = await sm.io.tfetch(url, input)
Want to train your model over the network? Just add an endpoint for a backward pass to io.serve
:
sm.io.serve({
run_model: (user, input) => {
const out = model(input)
// capture a backward pass
user.opt = (jacobian) => {
sm.optim.sgd(out.backward(jacobian), 1e-3)
}
return out
}
optimize_model: (user, jacobian) => {
// run it when that same user gives us a jacobian
user.opt(jacobian)
}
})
And the client can feed the jacobian with io.tfetch
:
const url = 'localhost:3000'
for (let i of sm.range(100)) {
const [input, ref_output] = get_data()
const output = await sm.io.tfetch(`${url}/run_model`, input)
// get the jacobian from a loss function
output.requires_grad = true
const loss = sm.loss.mse(output, ref_output)
loss.backward()
// send that jacobian back
await sm.io.tfetch(`${url}/optimize_model`, output.grad)
}
The above setup seems pretty useful, and we can get rid of that boiler-plate code. Using io.serve_model
will create /forward
and /optimize
endpoints for you.
import * as sm from '@shumai/shumai'
import { model } from './model'
sm.io.serve_model(model, sm.optim.sgd)
And the client can attach with io.remote_model
, which attaches a hook to backward
for automatic gradients.
import * as sm from '@shumai/shumai'
const model = sm.io.remote_model('localhost:3000')
for (let i of sm.range(100)) {
const [input, ref_output] = get_data()
const output = await model(input)
const loss = sm.loss.mse(ref_output, output)
// async now, as it propagates through the network
await loss.backward()
}
Want to run more than just a trivial remote trainer? Below is a distributed model parallel and pipelined server. We invoke multiple remote models and then make our own model server.
import * as sm from '@shumai/shumai'
const A = sm.io.remote_model('localhost:3001')
const B = sm.io.remote_model('localhost:3002')
const C = sm.io.remote_model('localhost:3003')
// no need to wrap this, autograd knows what's up
const weight = sm.randn([128, 128]).requireGrad()
function model(input) {
// this will run in parallel
const [a, b] = await sm.util.all(
A(input),
B(input)
)
// automatically pipelined (isn't async is great?)
const c = await C(a)
return c.mul(b).matmul(weight)
}
sm.io.serve_model(model, sm.optim.sgd)
Same client as before :)
All io.serve*
methods automatically give us basic /statistics
as JSON:
$ curl -s localhost:3000/statistics | jq
(env) bwasti@bwasti-mbp shumai % curl -s localhost:3000/statistics|jq
{
"forward": {
"hits": 1000,
"seconds": 0.12337932200005891
},
"optimize": {
"hits": 1000,
"seconds": 0.16975503499999103
}
}
but we can always add more:
sm.io.serve_model(model, sm.optim.sgd, {port:3000}, {
statistics: () => {
return {weight: weight.mean().toFloat32()}
}
})
$ curl -s localhost:3000/statistics | jq .weight
0.1128062978386879
including recursively:
sm.io.serve_model(model, sm.optim.sgd, {port:3000}, {
statistics: async () => {
return {A: await (await fetch('localhost:3001/statistics')).json()}
}
})