Skip to content

Instantly share code, notes, and snippets.

@Vindaar
Vindaar / multiBool.nim
Created June 24, 2021 14:27
old multi bools with macro impl I had lying around
import macros
proc processBool(n: NimNode): NimNode =
result = n
case n.kind
of nnkInfix:
if n[1].kind == nnkInfix:
# process first infix arg
let a = processBool(n[1])
var tmp = nnkInfix.newTree(ident("and"), a)
@Vindaar
Vindaar / seq_tensor_concept.nim
Created June 19, 2021 23:40
Combine `seq` and `Tensor` in a concept
import arraymancer
import arraymancer/laser/strided_iteration/foreach
import sequtils
import macros
import math
func len[T](t: Tensor[T]): int = t.size
iterator items[T](t: Tensor[T]): T =
doAssert t.rank == 1
@Vindaar
Vindaar / dplyr_pandas_comparison_to_nim.org
Last active October 10, 2023 17:32
A comparison of dplyr, Pandas and data frames in Nim (using ggplotnim)

Dplyr (R), Pandas (Python) & Nim data frame comparison

This comparison is inspired by the comparison here: https://gist.github.com/conormm/fd8b1980c28dd21cfaf6975c86c74d07

The Nim data frame implementation we use here is the Datamancer data frame.

Note that due to UFCS in Nim we can write the commands we present either similar to the Python notation as if the function were a method of the

Units

  • when multiplying or dividing units resulting units need to use same order always, e.g.
    let m: Meter
    let s: Second
    doAssert typeof(m * s) == typeof(s * m) == m•s
        

    We can do this by having a specific order of all units, like an

@Vindaar
Vindaar / rSimpleVegaLite.json
Created December 31, 2020 11:12
Simple vega-lite example from ggplotnim
{
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"description": "Vega-lite plot created by ggplotnim",
"title": "ggplotnim in Vega-Lite!",
"width": 640,
"height": 480,
"data": {
"values": [
{
"manufacturer": "chevrolet",
@Vindaar
Vindaar / pretty_printing_ND_tensor.nim
Last active December 29, 2020 09:58
Pretty printing of ND tensors supporting N > 3
import sequtils
type
Tensor[T] = object
size: int
shape: seq[int]
strides: seq[int]
data: seq[T]
proc parseStrides(shape: seq[int]): seq[int] =
@Vindaar
Vindaar / mpg_example.json
Created December 18, 2020 17:22
Just a vega plot
{
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"description": "Vega-lite plot created by ggplotnim",
"title": "",
"width": 640,
"height": 480,
"data": {
"values": [
{
"manufacturer": "audi",
import ggplotnim, numericalnim, arraymancer, sequtils
import nimpyArraymancer
proc genRand(): Tensor[float] =
result = randomTensor([10, 10], 75).asType(float)
# [[43.0, 10.0],
# [22.0, 75.0]].toTensor()
proc grad(tIn: Tensor[float], xdiff: float = 1.0): Tensor[float] =
@Vindaar
Vindaar / micrograd.nim
Created December 17, 2020 10:20
Direct Nim port of @karpathy's micrograd
import hashes, sets, math, strformat, algorithm
type
BackFn = proc(r: Value)
Value = ref object
data: float
grad: float
backFn: BackFn
prev: HashSet[Value]
op: string
# compile without (!) `--gc:arc` to get a SIGSEGV
import typetraits
type
RawMutableView*[T] = distinct ptr UncheckedArray[T]
CpuStorage*[T] = ref CpuStorageObj[T]
CpuStorageObj[T] = object
raw_buffer*: seq[T]
Tensor[T] = object
buf*: CpuStorage[T] # if tensor stores a `seq` itself it works fine