Skip to content

Instantly share code, notes, and snippets.

View ctodd's full-sized avatar

Chris Miller ctodd

View GitHub Profile
!cd lib && git clone https://github.com/apache/incubator-mxnet.git
!aws s3 cp s3://$s3_output_path/$training_job_name/output/model.tar.gz working/model/model-orig.tar.gz
### This command rolls back to the commit before they removed the deploy.py script. ###
!cd lib/incubator-mxnet && git checkout 26f44b71d8de84bbc88af496ae0aeb7ce535312d
### Unpack the model and move the files into a tmp directory ###
!cd working/model/tmp && tar xvfpz ../model.tar.gz.orig
!cd working/model/tmp && mv model_algo_1-0000.params model_resnet50_300-0100.params
!cd working/model/tmp && mv model_algo_1-symbol.json model_resnet50_300-symbol.json
import glob
test_images = glob.glob('images/test/*')
print(*test_images, sep="\n")
def prediction_to_bbox_data(image_path, prediction):
class_id, confidence, xmin, ymin, xmax, ymax = prediction
width, height = Image.open(image_path).size
bbox_data = {'class_id': class_id,
'height': (ymax-ymin)*height,
'width': (xmax-xmin)*width,
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_name = training_job_name + '-ep' + timestamp
print('Endpoint name: {}'.format(endpoint_name))
endpoint_params = {
'EndpointName': endpoint_name,
'EndpointConfigName': endpoint_config_name,
}
endpoint_response = client.create_endpoint(**endpoint_params)
print('EndpointArn = {}'.format(endpoint_response['EndpointArn']))
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_config_name = training_job_name + '-epc' + timestamp
endpoint_config_response = client.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.t2.medium',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
import time
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
model_name = training_job_name + '-model' + timestamp
training_image = training_info['AlgorithmSpecification']['TrainingImage']
model_data = training_info['ModelArtifacts']['S3ModelArtifacts']
primary_container = {
'Image': training_image,
'ModelDataUrl': model_data,
import time
import sagemaker
role = sagemaker.get_execution_role()
sess = sagemaker.Session()
training_image = sagemaker.amazon.amazon_estimator.get_image_uri(
boto3.Session().region_name, 'object-detection', repo_version='latest')
s3_output_path = 's3://{}/{}/output'.format(BUCKET, pfx_training)
pfx_training = PREFIX + '/training' if PREFIX else 'training'
# Defines paths for use in the training job request.
s3_train_data_path = 's3://{}/{}/{}'.format(BUCKET, pfx_training, augmented_manifest_filename_train)
s3_validation_data_path = 's3://{}/{}/{}'.format(BUCKET, pfx_training, augmented_manifest_filename_validation)
!aws s3 cp $augmented_manifest_filename_train s3://$BUCKET/$pfx_training/
!aws s3 cp $augmented_manifest_filename_validation s3://$BUCKET/$pfx_training/
import json
augmented_manifest_filename_output = local_manifest_dir + '/output.manifest'
with jsonlines.open(augmented_manifest_filename_output, 'r') as reader:
lines = list(reader)
# Shuffle data in place.
np.random.shuffle(lines)
dataset_size = len(lines)
!pip -q install --upgrade pip
!pip -q install jsonlines
import jsonlines
from itertools import islice
with jsonlines.open(augmented_manifest_file, 'r') as reader:
for desc in islice(reader, 10):
img_url = desc['source-ref']
img_file = "images/source/" + os.path.basename(img_url)
file_exists = os.path.isfile(img_file)
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import numpy as np
from itertools import cycle
def show_annotated_image(img_path, bboxes, prec):
im = np.array(Image.open(img_path), dtype=np.uint8)
# Create figure and axes