(draft; work in progress)
See also:
- Compilers
- Program analysis:
- Dynamic analysis - instrumentation, translation, sanitizers
# make sure you have `tac` [1] (if on on macOS) and `atuin` [2] installed, then drop the below in your ~/.zshrc | |
# | |
# [1]: https://unix.stackexchange.com/questions/114041/how-can-i-get-the-tac-command-on-os-x | |
# [2]: https://github.com/ellie/atuin | |
atuin-setup() { | |
! hash atuin && return | |
bindkey '^E' _atuin_search_widget | |
export ATUIN_NOBIND="true" |
gpu_info = !nvidia-smi | |
gpu_info = '\n'.join(gpu_info) | |
if gpu_info.find('failed') >= 0: | |
print('Not connected to a GPU') | |
else: | |
print(gpu_info) |
import torch | |
import torch.utils.dlpack | |
import jax | |
import jax.dlpack | |
# A generic mechanism for turning a JAX function into a PyTorch function. | |
def j2t(x_jax): | |
x_torch = torch.utils.dlpack.from_dlpack(jax.dlpack.to_dlpack(x_jax)) | |
return x_torch |
library(tidyverse) | |
library(tictoc) | |
library(arrow) | |
tic() | |
con <- DBI::dbConnect(duckdb::duckdb(), "data/pbp_db.duckdb") | |
nfl_pbp <- tbl(con, "nflfastR_pbp") | |
toc() |
(draft; work in progress)
See also:
import autograd.numpy as np | |
import scipy.stats as st | |
import matplotlib.pyplot as plt | |
import matplotlib.animation as animation | |
import matplotlib as mpl | |
import seaborn as sns | |
from minimc.minimc.minimc_slow import hamiltonian_monte_carlo as hmc_slow | |
from minimc.minimc import neg_log_normal, mixture |