Skip to content

Instantly share code, notes, and snippets.

View LiamHz's full-sized avatar

Liam Hinzman LiamHz

View GitHub Profile
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "https://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.liamhinzman.arbtt</string>
<key>Program</key>
<string>/Users/liamhinzman/screenshot.sh</string>
<key>RunAtLoad</key>
<true/>
@LiamHz
LiamHz / backup_ppm.cpp
Created February 2, 2020 08:08
Store the intermediate of a ppm file
// Store a backup ppm after rendering every 100 rows
if (j % 100 == 0) {
ofs.close();
temp_filename = "./out_" + std::to_string(ny-j) + ".ppm";
std::ifstream src("out.ppm", std::ios::binary);
std::ofstream dst(temp_filename, std::ios::binary);
dst << src.rdbuf();
@LiamHz
LiamHz / karabiner.json
Created July 16, 2019 04:28
Map caps lock to l_control when used as modifier and esc when alone
{
"profiles": [
{
"name": "Default profile",
"selected": true,
"complex_modifications": {
"rules": [
{
"description": "A Modern Space Cadet (Steve Losh)",
"manipulators": [
# Base length subtraction has to subtract file extension then re-add after
Dir | Rename-Item -NewName {$_.name.substring(3,$_.BaseName.length-4) + '.png'}
from google.colab.output import eval_js
from base64 import b64decode
VIDEO_HTML = """
<video autoplay
width=800 height=600></video>
<script>
var video = document.querySelector('video')
navigator.mediaDevices.getUserMedia({ video: true })
.then(stream=> video.srcObject = stream)
class DQN(nn.Module):
def __init__(self, input_shape, n_actions):
super(DQN, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
def predict_transform(prediction, inp_dim, anchors, num_classes, CUDA = True):
# Get the "features" portion of VGG19
# The classifier portion is not needed for style transfer
vgg = models.vgg19(pretrained=True).features
# Freeze all VGG parameters
# Since only activations are measured for each conv layer
# Network weights aren't modified
for param in vgg.parameters():
param.requires_grad_(False)
show_every = 400 # Show target image every x steps
optimizer = optim.Adam([target], lr=0.003) # Optimizer hyperparameters
steps = 2000 # How many iterations to update content image
# Training Loop
for ii in range(steps):
# Calculate the content loss
target_features = get_features(target, vgg)
content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2'])**2)
def gram_matrix(tensor):
# Get the batch_size, depth, height, and width of the Tensor
# Reshape it, so we're multiplying the features for each channel
_, d, h, w = tensor.size()
tensor = tensor.view(d, h*w)
# Calculate the gram matrix by multiplying the tensor with its transpose
gram = torch.mm(tensor, tensor.t())
return gram