Skip to content

Instantly share code, notes, and snippets.

View tlkh's full-sized avatar
coffee2code

Timothy Liu tlkh

coffee2code
View GitHub Profile
import argparse
import sys
import gym
from gym import wrappers, logger
class RandomAgent(object):
"""The world's simplest agent!"""
def __init__(self, action_space):
self.action_space = action_space
#!/bin/bash
echo "Set Jetson to Max Clocks"
jetson_clocks
echo "Clear RAM"
sync; echo 3 > /proc/sys/vm/drop_caches
@tlkh
tlkh / tf_run_glue.py
Created October 14, 2019 12:51
tf_transformers
import os
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
import tensorflow_datasets
from transformers import BertTokenizer, TFBertForSequenceClassification, glue_convert_examples_to_features#, BertForSequenceClassification
@tlkh
tlkh / proxy.py
Last active September 22, 2019 14:32
not network answer
# 50.012 network lab 1
# Adapted from K & R's original code
from socket import *
import sys
import _thread as thread
import os
import functools
@tlkh
tlkh / random.py
Created September 6, 2019 05:47
random
def function(arg_1, arg_2, arg_3=default_value):
return something
import time
import numpy as np
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
tf.enable_eager_execution()
import tensorflow.keras as keras
from tensorflow.keras.preprocessing import image
import tensorflow_datasets as tfds
@tlkh
tlkh / random.md
Last active August 23, 2019 04:02
elmo_model = hub.Module("https://tfhub.dev/google/elmo/2", trainable=False)

def ElmoEmbedding(x):
    return elmo_model(tf.squeeze(tf.cast(x, tf.string)),
                      signature="default", as_dict=True)["default"]
    
sequence_input = Input(shape=(1,), dtype=tf.string)
embedded_sequences = Lambda(ElmoEmbedding, output_shape=(1024,))(sequence_input)
...
@tlkh
tlkh / doc.md
Last active August 19, 2019 03:40
Docstring for tf.train.experimental.enable_mixed_precision_graph_rewrite

Enable mixed precision via a graph rewrite.

Mixed precision is the use of both float32 and float16 data types when training a model to improve performance. This is achieved via a graph rewrite operation and a loss-scale optimizer.

Performing arithmetic operations in float16 takes advantage of specialized processing units, such as NVIDIA Tensor Cores for much higher arithmetic throughput. However, due to the smaller representable range, performing the entire training with float16 can result in gradient underflow, that is, small

@tlkh
tlkh / keras_to_tf.keras.py
Last active August 4, 2018 17:43
Keras/Jupyter Stuff
import tensorflow as tf
import tf.keras as keras
# Configure a model for categorical classification.
model.compile(optimizer=tf.train.RMSPropOptimizer(0.01),
loss=keras.losses.categorical_crossentropy,
metrics=[keras.metrics.categorical_accuracy])
estimator = keras.estimator.model_to_estimator(model)