Skip to content

Instantly share code, notes, and snippets.

View George3d6's full-sized avatar
🦧
If a cleverly worded status is never read by anyone, is it still witty ?

George George3d6

🦧
If a cleverly worded status is never read by anyone, is it still witty ?
View GitHub Profile
@George3d6
George3d6 / Julia_and_Julia_sets_blog_notes.jl
Created May 7, 2018 14:56
Notes for a blog covering an introduction to julia using julia sets
#Pkg.add("Plots")
#Pkg.add("GR")
using Plots
@everywhere function pixel_color(x, y, width, height, c)
max_iter = 255
z = ( ((y/width)*2.7 - 1.3) + ((x/height)*4.5 - 2.5) *im )
for iter_nr = 1:max_iter
z = z^2 + c
Pkg.add("Knet")
Pkg.add("Plots")
Pkg.add("GR")
function julia(x, y, width, height, c)
z = ((y/width)*2.7 - 1.3) + ((x/height)*4.5 - 2.5)im
for i = 1:254
z = z^2 + c
if abs(z) >= 4
return Float32(i)
using Knet, Plots;
gr()
global atype = gpu()>=0 ? KnetArray{Float32} : Array{Float32}
# Initialize weights and biases, original code: https://github.com/ekinakyurek/GAN-70-Lines-of-Julia/blob/66a60a6ea4532841ee647f08759ae9b1ace0c892/gan.jl#L25
function initmodel(hidden, input, output)
𝗪 = [];
x = input
for h in [hidden... output]
push!(𝗪, atype(xavier(h,x)), atype(zeros(h, 1))) #FC Layers weights and bias
x = h
end
𝗪
end
leakyrelu(x;α=Float32(0.2)) = max(0,x) + α*min(0,x) # LeakyRelu activation
# A generic MLP forward prop function, original code: https://github.com/ekinakyurek/GAN-70-Lines-of-Julia/blob/66a60a6ea4532841ee647f08759ae9b1ace0c892/gan.jl#L6
function forward_prop(W,X;dropout_p=0.0)
for i=1:2:length(W)
X = W[i]*dropout(mat(X),dropout_p) .+ W[i+1] # mat(X) flattens X to an
i < length(W)-1 && (X = leakyrelu.(X))
end
sigm.(X)
end
# Forward prop for the discriminator and generator respectively
global const 𝜀=Float32(1e-8) # a small number prevent from getting NaN in logs
𝑱d(𝗪d,x,Gz) = -mean(log.(D(𝗪d,x)+𝜀)+log.(1-D(𝗪d,Gz)+𝜀))/2 # Discriminator Loss
𝑱g(𝗪g, 𝗪d, z) = -mean(log.(D(𝗪d,G(𝗪g,z))+𝜀)) # Generator Loss
∇d = grad(𝑱d) # Discriminator gradient
∇g = grad(𝑱g) # Generator gradient
𝒩(input, batch) = atype(randn(Float32, input, batch)) # SampleNoise
function generate_and_save(𝗪,number,𝞗,gen;fldr="save_image_directory/")
Gz = G(𝗪[1],𝒩(𝞗[:ginp],number)) #.> 0.5
Gz = permutedims(reshape(Gz,(𝞗[:size],𝞗[:size],number)),(2,1,3))
[png(heatmap(Gz[:,:,i]), "$(fldr)$(gen)-$(i).png") for i=1:number]
end
#original code: https://github.com/ekinakyurek/GAN-70-Lines-of-Julia/blob/66a60a6ea4532841ee647f08759ae9b1ace0c892/gan.jl#L42
function train_model(𝗪, data, 𝞗, optim)
gloss=dloss=counter=0.0;
B =
for generation=1:𝞗[:epochs]
for n=1:32:(length(data[1,1,1,:]) - 33)
x = data[:,:,:,n:n+31]
Gz = G(𝗪[1],𝒩(𝞗[:ginp],𝞗[:batchsize]))
update!(𝗪[2], ∇d(𝗪[2],x,Gz), optim[2])
function log_model(𝗪, data, 𝞗, generation)
println("Running logging function for generation $(generation)")
println("-----------------------------------------------------")
gloss=dloss=counter=0.0
for n=1:32:(length(data[1,1,1,:]) - 33)
x = data[:,:,:,n:n+31]
counter+=2*𝞗[:batchsize]
size = 58
X = get_training_data(6000,size)
main(size, X)