Skip to content

Instantly share code, notes, and snippets.

@russelljjarvis
Last active September 11, 2023 04:47
Show Gist options
  • Save russelljjarvis/af436c10a7559569f64b3cdbe1389236 to your computer and use it in GitHub Desktop.
Save russelljjarvis/af436c10a7559569f64b3cdbe1389236 to your computer and use it in GitHub Desktop.
Hi @yeshwanthravitheja this is where I made the julia native representation of NMNIST (no pycall). If you wanted to make it really efficient, I think polarity only needs 8bits, or just a booltype. Int32 could be unsigned (UInt32). Another thing that could help is the related concepts of streaming/circular-buffer and lazy evalutaion. Tables.jl ha…
# Hi @yeshwanthravitheja this is where I made the julia native representation of NMNIST (no pycall).
# If you wanted to make it really efficient, I think polarity only needs 8bits, or just a booltype. Int32 could be unsigned (UInt32).
# Another thing that could help is the related concepts of streaming/circular-buffer and lazy evalutaion.
# Tables.jl has a syntax for lazy loading big data sets (don't store all of the NMNIST in memory, just store in the currently accessed samples of NMNIST), this can be done with lazy loading.
# Similar magic is implemented by CircBuff type defined in OnlineStats.jl here https://github.com/joshday/OnlineStatsBase.jl/blob/master/src/stats.jl#L17-L60
# https://github.com/russelljjarvis/SpikeTime.jl/blob/restructure/examples2Vec/train_nmnist_performance_bm.jl
# https://github.com/russelljjarvis/SpikeTime.jl/blob/restructure/examples2Vec/train_nmnist.jl
## The entry point to call it, and iteratively save as it s made is here:
# messy spaggetti code, but it worked.
# https://github.com/russelljjarvis/SpikeTime.jl/blob/restructure/examples2Vec/read_all_mnmist.jl
using PyCall
using Revise
using Odesa
using Random
using ProgressMeter
using JLD
using NumPyArrays
using Plots
function build_data_set_native(events,storage,cnt,input_shape,l_change_cnt,l_old)
xx = Vector{Int32}([])
yy = Vector{Int32}([])
tts = Vector{Float32}([])
polarity = Vector{Int8}([])
label = Vector{Int32}([])
A = zeros((35,35))
I = LinearIndices(A)
pop_stimulation= Vector{Int32}([])
@inbounds for (ind_,ev) in enumerate(events)
cnt+=1
(x,y,ts,p,l) = ev
push!(pop_stimulation,Int32(I[CartesianIndex(convert(Int32,x),convert(Int32,y))]))
push!(xx,convert(Int32,x))
push!(yy,convert(Int32,y))
ts = Float32(convert(Float32,ts)/1000.0)
push!(tts,ts)
push!(polarity,convert(Int8,p))
l = convert(Int32,l)
push!(label,l)
end
did_it_exec::Tuple{Vector{Int32}, Vector{Int32}, Vector{Float32}, Vector{Int8}, Vector{Int32}, Vector{Any}} = (xx,yy,tts,polarity,label,pop_stimulation)
(cnt,did_it_exec,l_change_cnt,l_old)
end
function bds!()
pushfirst!(PyVector(pyimport("sys")."path"), "")
nmnist_module = pyimport("batch_nmnist_motions")
dataset::PyObject = nmnist_module.NMNIST("./")
training_order = 0:dataset.get_count()-1
#storage::Array{Tuple{Vector{Int32}, Vector{Int32}, Vector{Float32}, Vector{Int8}, Vector{Int32}, Vector{Any}}} = []
storage = []
input_shape = dataset.get_element_dimensions()
cnt = 0
l_change_cnt = 0
l_old = 4
@inbounds @showprogress for batch in 1:200:length(training_order)
events = dataset.get_dataset_item(training_order[batch:batch+1])
cnt,did_it_exec,l_change_cnt,l_old = build_data_set_native(events,storage,cnt,input_shape,l_change_cnt,l_old)
@save "part_mnmist_$cnt.jld" did_it_exec
end
end
bds!()
@load "all_mnmist.jld" storage
(x,y,times,p,l,nodes) = (storage[1][1],storage[1][2],storage[1][3],storage[1][4],storage[1][5],storage[1][6])
for (ind,s) in enumerate(storage)
(x,y,times,p,l,nodes) = (storage[s][1],storage[s][2],storage[s][3],storage[s][4],storage[s][5],storage[s][6])
@show(unique(l)[1])#,ind)
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment