-
-
Save ForceBru/8e7f6940c3013e15f62a6c660a618c85 to your computer and use it in GitHub Desktop.
LoopVectorization timings
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import Pkg | |
Pkg.activate(temp=true, io=devnull) | |
V_OLD, V_NEW = "0.12.99", "0.12.102" | |
Pkg.add(name="LoopVectorization", version=V_NEW, io=devnull) | |
Pkg.add(["Distributions", "ProgressMeter"], io=devnull) | |
Pkg.status() | |
module EM | |
import Distributions: Dirichlet | |
using LoopVectorization | |
const AV = AbstractVector{T} where T | |
const AM = AbstractMatrix{T} where T | |
abstract type AbstractRegularization end | |
const MaybeReg = Union{AbstractRegularization, Nothing} | |
struct InvGamma <: AbstractRegularization | |
α::Real | |
β::Real | |
end | |
function init_G(K, N, a::Real) | |
distr = Dirichlet(K, a) | |
rand(distr, N) | |
end | |
normal(x::Real, μ::Real, var::Real) = exp(-(x-μ)^2 / (2var)) / sqrt(2π * var) | |
function ELBO(G, p, mu, var, data, reg::Nothing)::Real | |
K, N = size(G) | |
ret = 0.0 | |
@tturbo for n ∈ 1:N, k ∈ 1:K | |
q = G[k, n] | |
ret += q * ( | |
log(p[k]) - (log(2π) + log(var[k]) + (data[n] - mu[k])^2 / var[k]) / 2 | |
- log(q + 1e-100) | |
) | |
end | |
ret | |
end | |
@inline ELBO(G, p, mu, var, data, reg::InvGamma) = ELBO(G, p, mu, var, data, nothing) | |
# ===== E step ===== | |
function finalize_posteriors!(G, norm::AV, reg::Nothing) | |
K, N = size(G) | |
@tturbo for n ∈ 1:N, k ∈ 1:K | |
G[k, n] /= norm[n] | |
end | |
end | |
@inline finalize_posteriors!(G, norm::AV, reg::InvGamma) = | |
finalize_posteriors!(G, norm, nothing) | |
function step_E!(G, norm::AV{<:Real}, p, mu, var, data, reg::MaybeReg) | |
K, N = size(G) | |
@assert length(norm) == N | |
norm .= 0 | |
@tturbo for n ∈ 1:N, k ∈ 1:K | |
G[k, n] = p[k] * exp(-(data[n] - mu[k])^2 / (2var[k])) / sqrt(2π * var[k]) | |
norm[n] += G[k, n] | |
end | |
finalize_posteriors!(G, norm, reg) | |
end | |
# ===== M step ===== | |
M_weights!(p, G, norm, reg::Nothing) = p .= norm ./ sum(norm) | |
@inline M_weights!(p, G, norm, reg::InvGamma) = M_weights!(p, G, norm, nothing) | |
function M_means!(mu, G, norm, data, reg::Nothing) | |
K, N = size(G) | |
mu .= 0 | |
@tturbo for n ∈ 1:N,k ∈ 1:K | |
mu[k] += G[k, n] / norm[k] * data[n] | |
end | |
end | |
@inline M_means!(mu, G, norm, data, reg::InvGamma) = | |
M_means!(mu, G, norm, data, nothing) | |
finalize_variances!(var::AV, norm::AV, reg::Nothing) = @turbo var .= var ./ norm .+ 1e-6 | |
function finalize_variances!(var::AV, norm::AV, reg::InvGamma) | |
α, β = reg.α, reg.β | |
@turbo @. var = (2β + var) / (2α + norm) | |
nothing | |
end | |
function M_variances!(var, G, norm, data, mu, reg::MaybeReg) | |
K, N = size(G) | |
var .= 0 | |
@tturbo for n ∈ 1:N, k ∈ 1:K | |
var[k] += G[k, n] * (data[n] - mu[k])^2 | |
end | |
finalize_variances!(var, norm, reg) | |
end | |
function step_M!(G, norm, p, mu, var, data, reg::MaybeReg) | |
K, N = size(G) | |
@assert K < N | |
evidences = @view norm[1:K] | |
evidences .= 0 | |
@tturbo for n ∈ 1:N, k ∈ 1:K | |
evidences[k] += G[k, n] | |
end | |
M_weights!(p, G, evidences, reg) | |
M_means!(mu, G, evidences, data, reg) | |
M_variances!(var, G, evidences, data, mu, reg) | |
end | |
end # module | |
using ProgressMeter | |
import Distributions: UnivariateGMM, Categorical | |
const AV = AbstractVector{T} where T | |
const AM = AbstractMatrix{T} where T | |
function sample_GMM(p::AV, mu::AV, var::AV; N::Integer) | |
distr = UnivariateGMM(mu, sqrt.(var), Categorical(p)) | |
rand(distr, N) | |
end | |
mutable struct GaussianMixture{T<:Real} | |
K::Integer | |
G::Union{Matrix{T}, Nothing} | |
norm::Union{Vector{T}, Nothing} | |
p::Vector{T} | |
mu::Vector{T} | |
var::Vector{T} | |
ELBO_hist::Vector{<:Real} | |
end | |
function GaussianMixture{T}(K::Integer) where T<:Real | |
@assert K > 0 | |
GaussianMixture{T}( | |
K, nothing, nothing, | |
zeros(T, K), zeros(T, K), zeros(T, K), | |
Real[] | |
) | |
end | |
@inline GaussianMixture(K::Integer) = GaussianMixture{Float64}(K) | |
function fit!( | |
gmm::GaussianMixture{T}, data::AV{<:Real}; | |
maxiter::Integer=10_000, | |
reg::EM.MaybeReg=nothing, a::Real=100, reinit::Bool=false | |
) where T<:Real | |
K, N = gmm.K, length(data) | |
has_reinit = false | |
if gmm.G === nothing || size(gmm.G) ≠ (K, N) || reinit | |
gmm.G = EM.init_G(K, N, a) | |
gmm.norm = zeros(T, N) | |
has_reinit = true | |
end | |
@assert size(gmm.G) == (K, N) | |
if has_reinit | |
EM.step_M!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
else | |
# Keep old parameters | |
EM.step_E!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
end | |
gmm.ELBO_hist = zeros(T, maxiter) | |
for i ∈ 1:maxiter | |
EM.step_E!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
EM.step_M!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
gmm.ELBO_hist[i] = EM.ELBO(gmm.G, gmm.p, gmm.mu, gmm.var, data, reg) | |
end | |
(; p=gmm.p, mu=gmm.mu, var=gmm.var) | |
end | |
function fit_rolling!( | |
gmm::GaussianMixture{T}, data::AV{<:Real}, win_size::Integer; | |
kwargs... | |
) where T<:Real | |
the_range = win_size:length(data) | |
P = Matrix{T}(undef, gmm.K, length(the_range)) | |
M, V = similar(P), similar(P) | |
@showprogress for (i, off) ∈ enumerate(the_range) | |
win = @view data[off-the_range[1]+1:off] | |
p, mu, var = fit!(gmm, win; kwargs...) | |
P[:, i] .= p | |
M[:, i] .= mu | |
V[:, i] .= var | |
end | |
P, M, V | |
end | |
@info "Generating data..." | |
data = sample_GMM([.6, .4], [0, 0], [.3, .2]; N=500) | |
K, WIN = 2, 300 | |
@info "Parameters" K WIN | |
@info "Fitting... (no regularization)" | |
mix = GaussianMixture(K) | |
fit_rolling!(mix, data, WIN) | |
@info "Fitting... (with regularization)" | |
mix = GaussianMixture(K) | |
reg = EM.InvGamma(.1, .1) | |
fit_rolling!(mix, data, WIN; reg=reg) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import Pkg | |
Pkg.activate(temp=true, io=devnull) | |
V_OLD, V_NEW = "0.12.99", "0.12.102" | |
Pkg.add(name="LoopVectorization", version=V_NEW, io=devnull) | |
Pkg.add(["Distributions", "ProgressMeter"], io=devnull) | |
Pkg.status() | |
module EM | |
import Distributions: Dirichlet | |
using LoopVectorization | |
const AV = AbstractVector{T} where T | |
const AM = AbstractMatrix{T} where T | |
abstract type AbstractRegularization end | |
const MaybeReg = Union{AbstractRegularization, Nothing} | |
struct InvGamma{T1<:Real, T2<:Real} <: AbstractRegularization | |
α::T1 | |
β::T2 | |
end | |
function init_G(K, N, a::T) where T<:Real | |
distr = Dirichlet(K, a) | |
rand(distr, N) | |
end | |
function ELBO(G, p, mu, var, data, reg::Nothing) | |
K, N = size(G) | |
ret = 0.0 | |
@tturbo for n ∈ 1:N, k ∈ 1:K | |
q = G[k, n] | |
ret += q * ( | |
log(p[k]) - (log(2π) + log(var[k]) + (data[n] - mu[k])^2 / var[k]) / 2 | |
- log(q + 1e-100) | |
) | |
end | |
ret | |
end | |
@inline ELBO(G, p, mu, var, data, reg::InvGamma) = ELBO(G, p, mu, var, data, nothing) | |
# ===== E step ===== | |
function finalize_posteriors!(G, norm::AV, reg::Nothing) | |
K, N = size(G) | |
@tturbo for n ∈ 1:N, k ∈ 1:K | |
G[k, n] /= norm[n] | |
end | |
end | |
@inline finalize_posteriors!(G, norm::AV, reg::InvGamma) = | |
finalize_posteriors!(G, norm, nothing) | |
function step_E!(G, norm::AV{<:Real}, p, mu, var, data, reg::MaybeReg) | |
K, N = size(G) | |
@assert length(norm) == N | |
norm .= 0 | |
@tturbo for n ∈ 1:N, k ∈ 1:K | |
G[k, n] = p[k] * exp(-(data[n] - mu[k])^2 / (2var[k])) / sqrt(2π * var[k]) | |
norm[n] += G[k, n] | |
end | |
finalize_posteriors!(G, norm, reg) | |
end | |
# ===== M step ===== | |
M_weights!(p, G, norm, reg::Nothing) = p .= norm ./ sum(norm) | |
@inline M_weights!(p, G, norm, reg::InvGamma) = M_weights!(p, G, norm, nothing) | |
function M_means!(mu, G, norm, data, reg::Nothing) | |
K, N = size(G) | |
mu .= 0 | |
@tturbo for n ∈ 1:N,k ∈ 1:K | |
mu[k] += G[k, n] / norm[k] * data[n] | |
end | |
end | |
@inline M_means!(mu, G, norm, data, reg::InvGamma) = | |
M_means!(mu, G, norm, data, nothing) | |
finalize_variances!(var::AV, norm::AV, reg::Nothing) = @turbo var .= var ./ norm .+ 1e-6 | |
function finalize_variances!(var::AV, norm::AV, reg::InvGamma) | |
α, β = reg.α, reg.β | |
@turbo @. var = (2β + var) / (2α + norm) | |
nothing | |
end | |
function M_variances!(var, G, norm, data, mu, reg::MaybeReg) | |
K, N = size(G) | |
var .= 0 | |
@tturbo for n ∈ 1:N, k ∈ 1:K | |
var[k] += G[k, n] * (data[n] - mu[k])^2 | |
end | |
finalize_variances!(var, norm, reg) | |
end | |
function step_M!(G, norm, p, mu, var, data, reg::MaybeReg) | |
K, N = size(G) | |
@assert K < N | |
evidences = @view norm[1:K] | |
evidences .= 0 | |
@tturbo for n ∈ 1:N, k ∈ 1:K | |
evidences[k] += G[k, n] | |
end | |
M_weights!(p, G, evidences, reg) | |
M_means!(mu, G, evidences, data, reg) | |
M_variances!(var, G, evidences, data, mu, reg) | |
end | |
end # module | |
using ProgressMeter | |
import Distributions: UnivariateGMM, Categorical | |
const AV = AbstractVector{T} where T | |
const AM = AbstractMatrix{T} where T | |
function sample_GMM(p::AV, mu::AV, var::AV; N::Integer) | |
distr = UnivariateGMM(mu, sqrt.(var), Categorical(p)) | |
rand(distr, N) | |
end | |
mutable struct GaussianMixture{T<:Real, E<:Real} | |
K::Integer | |
G::Union{Matrix{T}, Nothing} | |
norm::Union{Vector{T}, Nothing} | |
p::Vector{T} | |
mu::Vector{T} | |
var::Vector{T} | |
ELBO_hist::Vector{E} | |
end | |
function GaussianMixture{T, E}(K::Integer) where {T<:Real, E<:Real} | |
@assert K > 0 | |
GaussianMixture{T, E}( | |
K, nothing, nothing, | |
zeros(T, K), zeros(T, K), zeros(T, K), | |
E[] | |
) | |
end | |
@inline GaussianMixture(K::Integer) = GaussianMixture{Float64, Float64}(K) | |
function fit!( | |
gmm::GaussianMixture{T}, data::AV{<:Real}; | |
maxiter::Integer=10_000, | |
reg::EM.MaybeReg=nothing, a::T2=100, reinit::Bool=false | |
) where {T<:Real, T2<:Real} | |
K, N = gmm.K, length(data) | |
has_reinit = false | |
if gmm.G === nothing || size(gmm.G) ≠ (K, N) || reinit | |
gmm.G = EM.init_G(K, N, a) | |
gmm.norm = zeros(T, N) | |
has_reinit = true | |
end | |
@assert size(gmm.G) == (K, N) | |
if has_reinit | |
EM.step_M!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
else | |
# Keep old parameters | |
EM.step_E!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
end | |
gmm.ELBO_hist = zeros(T, maxiter) | |
for i ∈ 1:maxiter | |
EM.step_E!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
EM.step_M!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
gmm.ELBO_hist[i] = EM.ELBO(gmm.G, gmm.p, gmm.mu, gmm.var, data, reg) | |
end | |
(; p=gmm.p, mu=gmm.mu, var=gmm.var) | |
end | |
function fit_rolling!( | |
gmm::GaussianMixture{T}, data::AV{<:Real}, win_size::Integer; | |
kwargs... | |
) where T<:Real | |
the_range = win_size:length(data) | |
P = Matrix{T}(undef, gmm.K, length(the_range)) | |
M, V = similar(P), similar(P) | |
@showprogress for (i, off) ∈ enumerate(the_range) | |
win = @view data[off-the_range[1]+1:off] | |
p, mu, var = fit!(gmm, win; kwargs...) | |
P[:, i] .= p | |
M[:, i] .= mu | |
V[:, i] .= var | |
end | |
P, M, V | |
end | |
# Actually fit shit | |
@info "Generating data..." | |
data = sample_GMM([.6, .4], [0, 0], [.3, .2]; N=500) | |
K, WIN = 2, 300 | |
@info "Parameters" K WIN | |
@info "Fitting... (no regularization)" | |
mix = GaussianMixture(K) | |
fit_rolling!(mix, data, WIN) | |
@info "Fitting... (with regularization)" | |
mix = GaussianMixture(K) | |
reg = EM.InvGamma(.1, .1) | |
fit_rolling!(mix, data, WIN; reg=reg) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import Pkg | |
Pkg.activate(temp=true, io=devnull) | |
# V_OLD, V_NEW = "0.12.99", "0.12.102" | |
# Pkg.add(name="LoopVectorization", version=V_NEW, io=devnull) | |
Pkg.add(["Distributions", "ProgressMeter", "ProfileView"], io=devnull) | |
Pkg.status() | |
const K = 2 | |
const WIN_SIZE = 300 | |
module EM | |
import ..K, ..WIN_SIZE | |
import Distributions: Dirichlet | |
# using LoopVectorization | |
const AV = AbstractVector{T} where T | |
const AM = AbstractMatrix{T} where T | |
abstract type AbstractRegularization end | |
const MaybeReg = Union{AbstractRegularization, Nothing} | |
struct InvGamma <: AbstractRegularization | |
α::Float64 | |
β::Float64 | |
end | |
function init_G(_K, N, a::T) where T<:Real | |
@assert _K == K | |
@assert N == WIN_SIZE | |
distr = Dirichlet(_K, a) | |
rand(distr, N) | |
end | |
function ELBO(G::AM{Float64}, p::AV{Float64}, mu::AV{Float64}, var::AV{Float64}, data::AV{Float64}, reg::Nothing) | |
_K, N = size(G) | |
@assert _K == K | |
@assert N == WIN_SIZE | |
@assert length(p) == K | |
@assert length(mu) == K | |
@assert length(var) == K | |
@assert length(data) == WIN_SIZE | |
ret = 0.0 | |
for n ∈ 1:N, k ∈ 1:_K | |
q = G[k, n] | |
ret += q * ( | |
log(p[k]) - (log(2π) + log(var[k]) + (data[n] - mu[k])^2 / var[k]) / 2 | |
- log(q + 1e-100) | |
) | |
end | |
ret | |
end | |
@inline ELBO(G::AM{Float64}, p::AV{Float64}, mu::AV{Float64}, var::AV{Float64}, data::AV{Float64}, reg::InvGamma) = ELBO(G, p, mu, var, data, nothing) | |
# ===== E step ===== | |
function finalize_posteriors!(G::AM{Float64}, norm::AV{Float64}, reg::Nothing) | |
_K, N = size(G) | |
@assert _K == K | |
@assert N == WIN_SIZE | |
@assert length(norm) == WIN_SIZE | |
for n ∈ 1:N, k ∈ 1:_K | |
G[k, n] /= norm[n] | |
end | |
end | |
@inline finalize_posteriors!(G::AM{Float64}, norm::AV{Float64}, reg::InvGamma) = | |
finalize_posteriors!(G, norm, nothing) | |
function step_E!(G::AM{Float64}, norm::AV{Float64}, p::AV{Float64}, mu::AV{Float64}, var::AV{Float64}, data::AV{Float64}, reg::MaybeReg) | |
_K, N = size(G) | |
@assert _K == K | |
@assert N == WIN_SIZE | |
@assert length(norm) == WIN_SIZE | |
@assert length(p) == K | |
@assert length(mu) == K | |
@assert length(var) == K | |
@assert length(data) == WIN_SIZE | |
norm .= 0 | |
for n ∈ 1:N, k ∈ 1:_K | |
G[k, n] = p[k] * exp(-(data[n] - mu[k])^2 / (2var[k])) / sqrt(2π * var[k]) | |
norm[n] += G[k, n] | |
end | |
finalize_posteriors!(G, norm, reg) | |
end | |
# ===== M step ===== | |
M_weights!(p::AV{Float64}, G::AM{Float64}, norm::AV{Float64}, reg::Nothing) = p .= norm ./ sum(norm) | |
@inline M_weights!(p::AV{Float64}, G::AM{Float64}, norm::AV{Float64}, reg::InvGamma) = M_weights!(p, G, norm, nothing) | |
function M_means!(mu::AV{Float64}, G::AM{Float64}, norm::AV{Float64}, data::AV{Float64}, reg::Nothing) | |
_K, N = size(G) | |
@assert _K == K | |
@assert N == WIN_SIZE | |
@assert length(mu) == K | |
@assert length(norm) == K | |
@assert length(data) == WIN_SIZE | |
mu .= 0 | |
for n ∈ 1:N,k ∈ 1:_K | |
mu[k] += G[k, n] / norm[k] * data[n] | |
end | |
end | |
@inline M_means!(mu::AV{Float64}, G::AM{Float64}, norm::AV{Float64}, data::AV{Float64}, reg::InvGamma) = | |
M_means!(mu, G, norm, data, nothing) | |
finalize_variances!(var::AV{Float64}, norm::AV{Float64}, reg::Nothing) = var .= var ./ norm .+ 1e-6 | |
function finalize_variances!(var::AV{Float64}, norm::AV{Float64}, reg::InvGamma) | |
@assert length(var) == K | |
@assert length(norm) == K | |
α, β = reg.α, reg.β | |
@. var = (2β + var) / (2α + norm) | |
nothing | |
end | |
function M_variances!(var::AV{Float64}, G::AM{Float64}, norm::AV{Float64}, data::AV{Float64}, mu::AV{Float64}, reg::MaybeReg) | |
_K, N = size(G) | |
@assert _K == K | |
@assert N == WIN_SIZE | |
@assert length(var) == K | |
@assert length(norm) == K | |
@assert length(data) == WIN_SIZE | |
@assert length(mu) == K | |
var .= 0 | |
for n ∈ 1:N, k ∈ 1:K | |
var[k] += G[k, n] * (data[n] - mu[k])^2 | |
end | |
finalize_variances!(var, norm, reg) | |
end | |
function step_M!(G::AM{Float64}, norm::AV{Float64}, p::AV{Float64}, mu::AV{Float64}, var::AV{Float64}, data::AV{Float64}, reg::MaybeReg) | |
_K, N = size(G) | |
@assert _K < N | |
@assert _K == K | |
@assert N == WIN_SIZE | |
@assert length(norm) == N | |
@assert length(p) == K | |
@assert length(mu) == K | |
@assert length(var) == K | |
@assert length(data) == WIN_SIZE | |
evidences = @view norm[1:_K] | |
@assert length(evidences) == K | |
evidences .= 0 | |
for k ∈ 1:_K | |
for n ∈ 1:N | |
evidences[k] += G[k, n] | |
end | |
end | |
M_weights!(p, G, evidences, reg) | |
M_means!(mu, G, evidences, data, reg) | |
M_variances!(var, G, evidences, data, mu, reg) | |
end | |
end # module | |
using ProgressMeter | |
import Distributions: UnivariateGMM, Categorical | |
const AV = AbstractVector{T} where T | |
const AM = AbstractMatrix{T} where T | |
function sample_GMM(p::AV, mu::AV, var::AV; N::Integer) | |
distr = UnivariateGMM(mu, sqrt.(var), Categorical(p)) | |
rand(distr, N) | |
end | |
mutable struct GaussianMixture | |
K::Integer | |
G::Union{Matrix{Float64}, Nothing} | |
norm::Union{Vector{Float64}, Nothing} | |
p::Vector{Float64} | |
mu::Vector{Float64} | |
var::Vector{Float64} | |
ELBO_hist::Vector{Float64} | |
end | |
function GaussianMixture(K::Integer) | |
@assert K > 0 | |
GaussianMixture( | |
K, nothing, nothing, | |
zeros(Float64, K), zeros(Float64, K), zeros(Float64, K), | |
Float64[] | |
) | |
end | |
function fit!( | |
gmm::GaussianMixture, data::AV{Float64}; | |
maxiter::Integer=10_000, | |
reg::EM.MaybeReg=nothing, a::Float64=100.0, reinit::Bool=false | |
) | |
K, N = gmm.K, length(data) | |
has_reinit = false | |
if gmm.G === nothing || size(gmm.G) ≠ (K, N) || reinit | |
gmm.G = EM.init_G(K, N, a) | |
gmm.norm = zeros(Float64, N) | |
has_reinit = true | |
end | |
@assert size(gmm.G) == (K, N) | |
@assert eltype(gmm.G) <: AbstractFloat | |
if has_reinit | |
EM.step_M!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
else | |
# Keep old parameters | |
EM.step_E!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
end | |
gmm.ELBO_hist = zeros(Float64, maxiter) | |
for i ∈ 1:maxiter | |
EM.step_E!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
EM.step_M!(gmm.G, gmm.norm, gmm.p, gmm.mu, gmm.var, data, reg) | |
gmm.ELBO_hist[i] = EM.ELBO(gmm.G, gmm.p, gmm.mu, gmm.var, data, reg) | |
end | |
(; p=gmm.p, mu=gmm.mu, var=gmm.var) | |
end | |
function fit_rolling!( | |
gmm::GaussianMixture, data::AV{Float64}, win_size::Integer; | |
kwargs... | |
) | |
the_range = win_size:length(data) | |
P = Matrix{Float64}(undef, gmm.K, length(the_range)) | |
M, V = similar(P), similar(P) | |
@showprogress for (i, off) ∈ enumerate(the_range) | |
win = @view data[off-the_range[1]+1:off] | |
@assert length(win) == win_size | |
p, mu, var = fit!(gmm, win; kwargs...) | |
P[:, i] .= p | |
M[:, i] .= mu | |
V[:, i] .= var | |
end | |
P, M, V | |
end | |
import Random | |
Random.seed!(1) | |
@info "Generating data..." | |
data = sample_GMM([.6, .4], [0, 0], [.3, .2]; N=500) | |
@info "Parameters" K WIN_SIZE | |
@info "Fitting... (no regularization)" | |
mix = GaussianMixture(K) | |
fit_rolling!(mix, data, WIN_SIZE) | |
@info "Fitting... (with regularization)" | |
mix = GaussianMixture(K) | |
reg = EM.InvGamma(.1, .1) | |
fit_rolling!(mix, data, WIN_SIZE; reg=reg) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment