Skip to content

Instantly share code, notes, and snippets.

@EniasCailliau
EniasCailliau / query_podcast_insights.py
Last active September 22, 2022 07:49
Query podcast insights on Steamship
from steamship import Steamship
bible = Steamship.use("audio-analytics", "joe-rogan-bible")
bible.analyze_youtube(YOUTUBE_URL)
# Then, later, query like this
bible.query("""
kind "sentiment" and name "NEGATIVE"
overlaps {
kind "entity" and name "white powder"
from steamship import Steamship, File
workspace = Steamship(workspace="joe-rogan-bible")
with workspace:
f = File.create("youtube-importer", podcast_url)
f.transcribe().tag("entities").tag("sentiments").tag("topics").tag("summaries")
predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
import numpy as np
random_image_data = np.random.rand(28, 28, 1)
predictor.predict(random_image_data)
model_artifacts_location = f's3://{bucket}/{prefix}/artifacts'
estimator = TensorFlow(entry_point='cnn_fashion_mnist.py',
role=role,
input_mode='Pipe',
output_path=model_artifacts_location,
training_steps=20000,
evaluation_steps=100,
train_instance_count=1,
train_instance_type='ml.p2.xlarge',
train_data = 's3://sagemaker-eu-central-1-959924085179/radix/mnist_fashion_tutorial/data/mnist/train.tfrecords'
eval_data = 's3://sagemaker-eu-central-1-959924085179/radix/mnist_fashion_tutorial/data/mnist/validation.tfrecords'
tuner.fit({'train': train_data, 'eval': eval_data}, logs=False)
estimator = TensorFlow(entry_point='cnn_fashion_mnist.py',
role=role,
input_mode='Pipe',
training_steps=20000,
evaluation_steps=100,
train_instance_count=1,
train_instance_type='ml.c5.2xlarge',
base_job_name='radix_mnist_fashion')
# Define objective
objective_metric_name = 'loss'
objective_type = 'Minimize'
metric_definitions = [{'Name': 'loss',
'Regex': 'loss = ([0-9\\.]+)'}]
# Define hyperparameter ranges
hyperparameter_ranges = {
'learning_rate': ContinuousParameter(0.0001, 0.01),
'dropout_rate': ContinuousParameter(0.3, 1.0),
'nw_depth': IntegerParameter(1, 4),
import sagemaker
bucket = sagemaker.Session().default_bucket()
prefix = 'radix/mnist_fashion_tutorial'
role = sagemaker.get_execution_role()
import boto3
from time import gmtime, strftime
from sagemaker.tensorflow import TensorFlow
import os
import tensorflow as tf
from tensorflow.python.estimator.model_fn import ModeKeys as Modes
from sagemaker_tensorflow import PipeModeDataset
from tensorflow.contrib.data import map_and_batch
INPUT_TENSOR_NAME = 'inputs'
SIGNATURE_NAME = 'predictions'
PREFETCH_SIZE = 10
BATCH_SIZE = 128
convert_mnist_fashion_dataset(train_images, train_labels, 'train', 'data')
convert_mnist_fashion_dataset(test_images, test_labels, 'validation', 'data')
import sagemaker
bucket = sagemaker.Session().default_bucket() # Automatically create a bucket
prefix = 'radix/mnist_fashion_tutorial' # Subfolder prefix
s3_url = sagemaker.Session().upload_data(path='data',
bucket=bucket,
key_prefix=prefix+'/data/mnist')