Skip to content

Instantly share code, notes, and snippets.

View bartvm's full-sized avatar

Bart van Merriënboer bartvm

View GitHub Profile
import array
import collections
import bisect
class MKAverage:
def __init__(self, m: int, k: int):
# self.q = collections.deque()
self.q = array.array("L")
self.p = None
class MKAverage:
def __init__(self, m: int, k: int):
self.q = SkipList(6, 0.5)
self.m = m
self.k = k
self.counter = 0
self.sum_ = 0
import math
import random
class Node:
__slots__ = ["key", "forward", "backward", "next"]
def __init__(self, key, level):
self.key = key
self.forward = [None] * (level + 1)
self.backward = [None] * (level + 1)
@bartvm
bartvm / anonymous_ballet.md
Last active April 1, 2019 21:03
Anonymous ballot

Anonymous ballot

The goal is to allow a user to anonymously share sensitive information which can be used in a selection process. The user must be able to prove they were admitted without revealing their sensitive information. A two-step ring signature process meets these requirements.

Ring signature

Consider a group of n users where each user i has a public, Pi, and secret (private) key, Si. A ring signature allows a user to sign a message using their private key, s = sign(m, Si, P1, ..., Pn), proving that they are a member of the group without revealing their identity.

Approach

def f(x, y):
z = x * y
return z
# Operator overloading
adjoints = {
'mul': lambda dz, x, y: dz * y, dz * x
}
def f(x, y):
diff --git a/torch/csrc/autograd/engine.cpp b/torch/csrc/autograd/engine.cpp
index f1e09b0e..c50f507e 100644
--- a/torch/csrc/autograd/engine.cpp
+++ b/torch/csrc/autograd/engine.cpp
@@ -21,6 +21,10 @@
#include <THC/THC.h>
#endif
+void tid() {
+ printf("%d ", (int)std::hash<std::thread::id>()(std::this_thread::get_id()));
diff --git a/torch/csrc/autograd/engine.cpp b/torch/csrc/autograd/engine.cpp
index f1e09b0e..d78c03e8 100644
--- a/torch/csrc/autograd/engine.cpp
+++ b/torch/csrc/autograd/engine.cpp
@@ -110,7 +110,7 @@ Engine::~Engine() = default;
auto Engine::thread_main(std::shared_ptr<ReadyQueue> queue, int device) -> void {
THInferNumThreads();
AutoGPU guard(device);
- while (1) {
+ while (!exit.back().load()) {
from torch import Tensor
from torch.autograd import Function, Variable
class Foo(Function):
def forward(self, x):
return x
def backward(self, dz_star):
dphidz = Variable(Tensor(1), requires_grad=True)
dphidz.backward()
import ast
import collections
import inspect
import numbers
import textwrap
import numpy
PUSH = ast.Attribute(value=ast.Name(id='_stack', ctx=ast.Load()),
attr='push', ctx=ast.Load())
class Linear:
def __init__(self, in_dim, out_dim):
self.params = {
'W': np.random.rand(in_dim, out_dim)
'b': np.random.rand(out_dim)
}
def run(self, x):
return sigmoid(self.params['W'] @ x + self.params['b'])