Skip to content

Instantly share code, notes, and snippets.

View matpalm's full-sized avatar
🎯
Focusing

mat kelcey matpalm

🎯
Focusing
View GitHub Profile
@matpalm
matpalm / gpu_stat.py
Last active October 10, 2023 10:31
json formatting of nvidia-settings
#!/usr/bin/env python
# gpu_stat.py [DELAY [COUNT]]
# dump some gpu stats as a line of json
# {"util":{"PCIe":"0", "memory":"11", "video":"0", "graphics":"13"}, "used_mem":"161"}
import json, socket, subprocess, sys, time
try:
delay = int(sys.argv[1])
except:
delay = 1
import numpy as np
def slerp(p0, p1, n):
# https://en.wikipedia.org/wiki/Slerp
norm = np.linalg.norm(p0) * np.linalg.norm(p1)
dot = np.sum(p0 * p1 / norm)
theta_0 = np.arccos(dot)
sin_theta_0 = np.sin(theta_0)
interp = []
for t in np.linspace(0.0, 1.0, n):
#!/usr/bin/env python
# gpu_stat.py [DELAY [COUNT]]
# dump gpu stats as a line of json
# {"time": 1474168378.146957, "pci_tx": 146000, "pci_rx": 1508000,
# "gpu_util": 42, "mem_util": 24, "mem_used": 11710,
# "temp": 76, "fan_speed": 44, "power": 65 }
#!/usr/bin/env python3
from PIL import Image
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
# smooth values from point a to point b.
STEPS = 100
pt_a = np.random.normal(size=(512))
def triplet_loss(anchor_embeddings,
positive_embeddings,
negative_embeddings,
margin=0.0):
dist_a_p = tf.norm(anchor_embeddings - positive_embeddings, axis=1) # (B)
dist_a_n = tf.norm(anchor_embeddings - negative_embeddings, axis=1) # (B)
constraint = dist_a_p - dist_a_n + margin # (B)
per_element_hinge_loss = tf.maximum(0.0, constraint) # (B)
return tf.reduce_mean(per_element_hinge_loss)
class EmbeddingModel(keras.Model):
def train_step(self, data):
anchors, positives = data
print("a,p", anchors.shape, positives.shape)
with tf.GradientTape() as tape:
# Run both anchors and positives through model.
anchor_embeddings = self(anchors, training=True)
positive_embeddings = self(positives, training=True)
import numpy as np
def rnd(*args):
return np.random.random(args)
#!/usr/bin/env python3
import argparse
import model as m
from tensorflow.keras.callbacks import *
import data as d
import tensorflow as tf
import os
from lr_finder import LearningRateFinder
1/Unknown - 0s 14ms/step
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-2-231b4cfe798d> in <module>()
19 dataset = dataset.batch(4).prefetch(1)
20
---> 21 model.fit(dataset)
/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
732 max_queue_size=max_queue_size,
// set one shot reading
register_data = MPL3115A2_CTRL_REG1_OST |
MPL3115A2_CTRL_REG1_BAR;
Wire.beginTransmission(MPL3115A2_ADDRESS);
Wire.write(MPL3115A2_CTRL_REG1);
Wire.write(register_data);
Wire.endTransmission(true);
// wait for one shot bit to auto clear
bool ready = false;