Skip to content

Instantly share code, notes, and snippets.

--- benchmarks.py 2020-03-16 19:45:33.000000000 -0700
+++ benchmarks_new.py 2020-03-16 19:48:08.000000000 -0700
@@ -12,10 +12,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from os import getenv
+
import pandas as pd
import requests
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
from nvidia.dali import pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from nvidia.dali.plugin.pytorch import DALIGenericIterator
from typing import Sequence
def _pipelines_sizes(pipes):
for p in pipes:
p.build()
from ignite.engine import Engine, _prepare_batch
from ignite.engine import Events
from ignite.contrib.handlers import ProgressBar
from ignite.metrics import Accuracy, Loss, RunningAverage
def create_keras_supervised_trainer(model, optimizer, loss_fn, metrics={}, device=None , prepare_batch=None):
from ignite.engine import Engine, _prepare_batch
from ignite.engine import Events
from ignite.contrib.handlers import ProgressBar
from ignite.metrics import Accuracy, Loss, RunningAverage
def create_attention_layer(features, hidden, skip_sum=False):
embed_current = Dense(10, activation='tanh')
embed_past = Dense(10, activation='tanh')
reduce_to_score = Dense(1)
hidden_with_time_axis = Lambda(lambda x: tf.expand_dims(x, 1), name="this_does_basically_nothing_just_reshapes_inputs_to_match")(hidden)
score = Add()([embed_current(features), embed_past(hidden_with_time_axis)])
attention_weights = Softmax(axis=1, name="this_softmax_gets_a_weight_for_each_timestep")(reduce_to_score(score))
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
@dnola
dnola / convert_to_TFTRT_graph.py
Created March 6, 2019 00:54
TF-TRT Example Workflow
# Import TensorFlow and TensorRT
import tensorflow as tf
import tensorflow.contrib.tensorrt as trt
import numpy as np
def get_frozen_graph(graph_file):
"""Read Frozen Graph file from disk."""
with tf.gfile.FastGFile(graph_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
import numpy as np
import pandas as pd
import random
stock_price = np.random.rand(10000).tolist() # 10000 days of stock data
up_or_down = [1 if stock_price[x] > stock_price[x+1] else 0 for x in range(len(stock_price)-1)]
print(up_or_down[:100])
# generate a bunch of RSI indicators
@dnola
dnola / custom_tuning_example.py
Last active July 26, 2017 07:12
Using SearchWrapper for OOM, Callback-compatible, Model API compatible tuning.
"""
Using SearchWrapper for OOM, Callback-compatible, Model API compatible tuning.
A small example of hyperparameter search in Keras.
First, try grid search.
Then, try a wide random search (many configs) with only a few batches of data - a 'short random search'
Use the best model from 'short random', train to completion, compare with grid results.
The key here is that we only keep one model in memory at once! Large searches (and large models) mean models have to be
import pandas as pd
import datetime
from pandas_datareader.data import DataReader
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
from matplotlib import cm