Skip to content

Instantly share code, notes, and snippets.

View mkolod's full-sized avatar

Marek Kolodziej mkolod

  • San Francisco Bay Area, CA
View GitHub Profile
#!/usr/bin/python3
# Python 3 changes hash seeds upon interpreter start.
# https://docs.python.org/3/reference/datamodel.html#object.__hash__
#
# This was to fix the following vulnerability:
# http://ocert.org/advisories/ocert-2011-003.html
#
# For non-web apps, the hash non-determinism between interpreter runs
# can be fixed by setting an env var:
@mkolod
mkolod / dft_idft.ipynb
Created July 7, 2021 02:31
DFT / IDFT
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@mkolod
mkolod / parallel_reservoir_sampling_equivalent.ipynb
Created June 9, 2021 16:59
Parallel Reservoir Sampling Equivalent
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
/* Floating Point 4x4 Matrix Multiplication */
.global _start
_start:
LDR R0, =matrix0
LDR R1, =matrix1
LDR R2, =matrix2
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@mkolod
mkolod / disjoint_set_forest.py
Last active January 7, 2021 05:24
Disjoint Set Forest
class DisjointForest:
class Subset:
def __init__(self, elem, parent=None, rank=0):
self.elem = elem
self.parent = parent
self.rank = rank
def __repr__(self):
#include <chrono>
#include <cmath>
#include <future>
#include <iostream>
#include <memory>
#include <mutex>
#include <thread>
template<typename Ret, typename Fun, typename Arg>
class ReusableWorkerThreadWithFuture {
@mkolod
mkolod / fake_gpu_utilization.cu
Created August 6, 2020 16:05
fake_gpu_utilization
#include <string>
#include <unistd.h>
#include <iostream>
#include <nvml.h>
#define cudaSafeCall(call) \
do { \
cudaError_t err = call; \
if (cudaSuccess != err) { \
fprintf (stderr, "Cuda error in file '%s' in line %i : %s.", \
# NOTE: The network here is not means to make any sense. It's just for measuring perf impact.
import torch
import torch.nn.functional as F
from time import time
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
fcs = [torch.nn.Linear(10, 100)] + [torch.nn.Linear(100, 100) for _ in range(20)]
self.fcs = torch.nn.Sequential(*fcs)
#include <stdio.h>
#include <thread>
#include <chrono>
#include <iostream>
const int N = 1 << 20;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;