Created
January 19, 2019 04:25
-
-
Save ChrisRackauckas/89bb4181bb1c82266daeeec68c7f8d0e to your computer and use it in GitHub Desktop.
Example code from the DiffEqFlux.jl blog post
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
using DifferentialEquations, Flux, DiffEqFlux, Plots | |
#################################################### | |
## Solve an ODE | |
#################################################### | |
function lotka_volterra(du,u,p,t) | |
x, y = u | |
α, β, δ, γ = p | |
du[1] = dx = α*x - β*x*y | |
du[2] = dy = -δ*y + γ*x*y | |
end | |
u0 = [1.0,1.0] | |
tspan = (0.0,10.0) | |
p = [1.5,1.0,3.0,1.0] | |
prob = ODEProblem(lotka_volterra,u0,tspan,p) | |
sol = solve(prob) | |
using Plots | |
plot(sol) | |
savefig("lv_sol.png") | |
#################################################### | |
## Generate a dataset from an ODE | |
#################################################### | |
p = [1.5,1.0,3.0,1.0] | |
prob = ODEProblem(lotka_volterra,u0,tspan,p) | |
sol = solve(prob,Tsit5(),saveat=0.1) | |
A = sol[1,:] # length 101 vector | |
plot(sol) | |
t = 0:0.1:10.0 | |
scatter!(t,A,label="Data Points") | |
savefig("data_points.png") | |
#################################################### | |
## Put an ODE in a neural network | |
#################################################### | |
reduction(sol) = sol[1,:] | |
diffeq_rd(p,reduction,prob,Tsit5(),saveat=0.1) | |
p = param([2.2, 1.0, 2.0, 0.4]) # Initial Parameter Vector | |
params = Flux.Params([p]) | |
function predict_rd() # Our 1-layer neural network | |
diffeq_rd(p,reduction,prob,Tsit5(),saveat=0.1) | |
end | |
loss_rd() = sum(abs2,x-1 for x in predict_rd()) # loss function | |
data = Iterators.repeated((), 100) | |
opt = ADAM(0.1) | |
cb = function () #callback function to observe training | |
display(loss_rd()) | |
# using `remake` to re-create our `prob` with current parameters `p` | |
display(plot(solve(remake(prob,p=Flux.data(p)),Tsit5(),saveat=0.1),ylim=(0,6))) | |
end | |
# Display the ODE with the initial parameter values. | |
cb() | |
Flux.train!(loss_rd, params, data, opt, cb = cb) | |
#################################################### | |
## Solve a stiff ODE | |
#################################################### | |
rober = @ode_def Rober begin | |
dy₁ = -k₁*y₁+k₃*y₂*y₃ | |
dy₂ = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃ | |
dy₃ = k₂*y₂^2 | |
end k₁ k₂ k₃ | |
prob = ODEProblem(rober,[1.0;0.0;0.0],(0.0,1e11),(0.04,3e7,1e4)) | |
sol = solve(prob,KenCarp4()) | |
plot(sol,xscale=:log10,tspan=(0.1,1e11),labels=["y1","y2","y3"]) | |
savefig("ROBER_plot.png") | |
# Run the following commands to see non-stiff solvers fail | |
# sol = solve(prob,CVODE_Adams()) | |
# sol = solve(prob,DP5()) | |
# Install ODEInterface + ODEInterfaceDiffEq to use the original | |
# Fortran dopri5 and see the failure there | |
# using ODEInterfaceDiffEq | |
# sol = solve(prob,dopri5()) | |
#################################################### | |
## Add a DDE to a neural network | |
#################################################### | |
function delay_lotka_volterra(du,u,h,p,t) | |
x, y = u | |
α, β, δ, γ = p | |
du[1] = dx = (α - β*y)*h(p,t-0.1)[1] | |
du[2] = dy = (δ*x - γ)*y | |
end | |
h(p,t) = ones(eltype(p),2) | |
prob = DDEProblem(delay_lotka_volterra,[1.0,1.0],h,(0.0,10.0),constant_lags=[0.1]) | |
p = param([2.2, 1.0, 2.0, 0.4]) | |
params = Flux.Params([p]) | |
reduction2(sol) = sol[1,1:101] | |
function predict_fd_dde() | |
diffeq_fd(p,reduction2,101,prob,MethodOfSteps(Tsit5()),saveat=0.1) | |
end | |
loss_fd_dde() = sum(abs2,x-1 for x in predict_fd_dde()) | |
loss_fd_dde() | |
#################################################### | |
## Add an SDE to a neural network | |
#################################################### | |
function lotka_volterra_noise(du,u,p,t) | |
du[1] = 0.1u[1] | |
du[2] = 0.1u[2] | |
end | |
prob = SDEProblem(lotka_volterra,lotka_volterra_noise,[1.0,1.0],(0.0,10.0)) | |
p = param([2.2, 1.0, 2.0, 0.4]) | |
params = Flux.Params([p]) | |
function predict_fd_sde() | |
diffeq_fd(p,reduction,101,prob,SOSRI(),saveat=0.1) | |
end | |
loss_fd_sde() = sum(abs2,x-1 for x in predict_fd_sde()) | |
data = Iterators.repeated((), 100) | |
opt = ADAM(0.1) | |
cb = function () | |
display(loss_fd_sde()) | |
display(plot(solve(remake(prob,p=Flux.data(p)),SOSRI(),saveat=0.1),ylim=(0,6))) | |
end | |
# Display the ODE with the current parameter values. | |
cb() | |
Flux.train!(loss_fd_sde, params, data, opt, cb = cb) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment