Skip to content

Instantly share code, notes, and snippets.

View jw3126's full-sized avatar

Jan Weidner jw3126

  • Freiburg, Germany
View GitHub Profile
@jw3126
jw3126 / so3.jl
Created October 5, 2018 12:51
Haar Measure of distance Ball SO(3)
# https://math.stackexchange.com/questions/1049788/haar-measure-of-an-angle-distance-ball-in-so3
using StatsBase
using LinearAlgebra
using StatPlots
using Plots
using Rotations
function sample_angles(N)
map(1:N) do _
@jw3126
jw3126 / README.md
Created November 5, 2021 21:48
Evolution

image

@jw3126
jw3126 / klein_nishina.jl
Last active December 12, 2020 09:04
Klein-Nishina formula
using Unitful
using Unitful: MeV, NoUnits, cm
using UnitfulRecipes
const h = 6.626_070_040e-34*u"J*s"
const h_bar = h / (2pi)
const m_e = 9.10938356e-31 * u"kg"
const c = 299_792_458.0 * u"m/s"
const r_c = h_bar / (c*m_e) # reduced compton wavelength of electron
@jw3126
jw3126 / mwe.py
Last active November 20, 2020 13:58
pytorch_lightning_ddp_gradient_checkpointing_bug
# This reproduces a pytorch_lightning issue
# where gradient checkpointing + ddp results in nan loss
#
# * Run with gpus=1 and it works fine.
# * Run with gpus=4 and it loss becomes nan quickly
#
# See also https://forums.pytorchlightning.ai/t/gradient-checkpointing-ddp-nan/398
import torch
from torch import nn
from torch.nn import functional as F
@jw3126
jw3126 / flux_vs_keras.jl
Created October 2, 2020 09:08
Benchmark 1d convnet keras vs flux
using PyCall
using Flux
function doit_keras(cfg)
keras = pyimport("tensorflow.keras")
inp = keras.layers.Input((nothing, 1))
x = inp
x = keras.layers.Conv1D(kernel_size=51, filters=50)(x)
x = keras.layers.Conv1D(kernel_size=1, filters=1)(x)
out = x
using Test
using Convex1D
using BenchmarkTools
function minimize_scaled_L1_diff(xs, ys)
# find t::Number that minimizers f(t) = sum(abs, t*xs - ys)
# f is convex with piecewise constant derivative given by
# f'(t) = Σ xi * sign(t*xi - yi)
# One of the points ti := yi/xi must be a minimizer (for some xi !=0. If all xi==0 then f == const anyway)
# Based on remarks of Mathieu Tanneau in slack
@jw3126
jw3126 / conv1d.jl
Created April 27, 2020 13:48
Julia CUDA 1d nn style large batch convolution
using CUDAnative, CuArrays
macro cushow(ex)
val = gensym("val")
s = string(ex)
quote
$val = $(esc(ex))
CUDAnative.@cuprintln($(Expr(:string, s, " = ", val)))
$val
end
@jw3126
jw3126 / layered_array.jl
Created December 30, 2019 12:39
layered array
using Revise
using ArgCheck
struct LayeredArray{T, N, L} <: AbstractArray{T,N}
layers::L
function LayeredArray(layers)
@argcheck !isempty(layers)
@argcheck first(layers) isa AbstractArray
l = first(layers)
L = typeof(layers)
using Makie
using JuAFEM, SparseArrays
using LinearAlgebra
#
# Poisson example from JuAFEM docs
#
grid = generate_grid(Triangle, (20, 20));
dim = 2
@jw3126
jw3126 / advection.jl
Created August 5, 2019 19:28
advection problems
# ]add AbstractPlotting
# ]add Makie
using Makie
function step!(u_new, u, o)
# u_new[i] = u[i] + dt*v*(u[i] - u[i-1])/dx
#
for i in reverse(eachindex(u))
s = o.v*o.dt/o.dx
u_new[i] = u[i] + s * (u[i] - get(u, i-1, zero(eltype(u))))