Skip to content

Instantly share code, notes, and snippets.

View j20232's full-sized avatar
😾

mocobt j20232

😾
  • Tokyo
View GitHub Profile
@j20232
j20232 / Algorithm
Last active December 18, 2019 14:42
Basic algorithms
For vacation 🏝
@j20232
j20232 / cpp.sh
Last active January 19, 2020 14:44
Code-runner for AtCoder
#!/bin/sh
/usr/local/bin/g++-9 $1 -o a.out
contest=$(cut -d '_' -f 1 <<< $1)
index=$(cut -d '_' -f 2 <<< $1)
type=$(cut -d '.' -f 1 <<< $(cut -d '_' -f 3 <<< $1))
url="https://${contest}${index}.contest.atcoder.jp/tasks/${contest}${index}_${type}"
rm -rf test/*
oj dl $url
oj test
@j20232
j20232 / pixelart.py
Created May 3, 2020 20:09
pixelart.py
import numpy as np
import cv2
from sklearn import preprocessing
def convert_to_dot(img, color_num=16, dot_size=10):
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) / 255.0
flat_lab = lab.reshape((img.shape[0] * img.shape[1], 3))
Z = preprocessing.StandardScaler().fit_transform(flat_lab).astype(np.float32)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 10.0)
_,label,_= cv2.kmeans(Z, color_num, None, criteria, 20, cv2.KMEANS_PP_CENTERS)
import numpy as np
def first_order_cp(X, rank, la=0.01, eta=0.01, eps=1e-5):
"""L2-regularized cp decomposition with first order alternating gradient descent
Args:
X (np.ndarray): IxJxK matrix
rank (int): rank for decomposed matrices ([I, J] -> [I, rank], [rank, J])
la (float, optional): coefficient for regularization. Defaults to 0.01.
eta (float, optional): learning rate. Defaults to 0.01.
@j20232
j20232 / mesh_smoothing.py
Last active June 23, 2021 12:24
Mesh smoothing based on Laplace-Beltrami operator
import numpy as np
import scipy
import trimesh
import polyscope as ps
from pathlib import Path
def double_area(geom):
i = geom.faces[:, 0] # [num_faces]
j = geom.faces[:, 1] # [num_faces]
import numpy as np
import scipy
import trimesh
import polyscope as ps
from pathlib import Path
from tqdm import tqdm
def double_area(geom):
i = geom.faces[:, 0] # [num_faces]
@j20232
j20232 / tiny_laf.py
Created July 7, 2021 14:53
Tiny snippets of Learning Aggregation Functions [Pellegrini et al. IJCAI2021] https://arxiv.org/abs/2012.08482
import os
import random
from tqdm import tqdm
import numpy as np
import torch
def seed_everything(seed=1116):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
@j20232
j20232 / convergence_test_optimized_with_torch_and_np.py
Created August 3, 2021 11:11
convergence_test_optimized_with_torch_and_np.py
import numpy as np
import matplotlib.pyplot as plt
import torch
def func(x):
return 1e-4 * ((x - 6) ** 3) + 1e-4 * ((x - 5) ** 4) + 1e-2 * ((np.sin(x * 0.1) - 3) ** 2)
def torch_func(x):
return 1e-4 * ((x - 6) ** 3) + 1e-4 * ((x - 5) ** 4) + 1e-2 * ((torch.sin(x * 0.1) - 3) ** 2)
class FiniteDiff(torch.autograd.Function):
@j20232
j20232 / taylor_approximation.py
Created October 12, 2021 05:22
taylor_approximation.py
import numpy as np
import argparse
def f(x, func_id):
if func_id == 0:
return x[0] * x[1] + np.log(x[0])
elif func_id == 1:
return np.sin(x[0]) + np.cos(x[1])
else: