General syntax:
<protocol>://[<user>[:<password>]@]<hostname>[:<port>][:][/]<path>[#<commit-ish> | #semver:<semver>]
prefer the slash here ^
These URLs work:
import gradio as gr | |
import torch | |
import requests | |
from PIL import Image | |
from torchvision import transforms | |
model = torch.hub.load('pytorch/vision:v0.6.0', | |
'resnet18', pretrained=True).eval() |
import argparse | |
import subprocess | |
from pathlib import Path | |
if __name__ == "__main__": | |
parser = argparse.ArgumentParser( | |
description="Just an example", | |
formatter_class=argparse.ArgumentDefaultsHelpFormatter, | |
) | |
parser.add_argument( |
{ | |
"data": [ | |
"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/4gIoSUNDX1BST0ZJTEUAAQEAAAIYAAAAAAQwAABtbnRyUkdCIFhZWiAAAAAAAAAAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAAHRyWFlaAAABZAAAABRnWFlaAAABeAAAABRiWFlaAAABjAAAABRyVFJDAAABoAAAAChnVFJDAAABoAAAAChiVFJDAAABoAAAACh3dHB0AAAByAAAABRjcHJ0AAAB3AAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAFgAAAAcAHMAUgBHAEIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFhZWiAAAAAAAABvogAAOPUAAAOQWFlaIAAAAAAAAGKZAAC3hQAAGNpYWVogAAAAAAAAJKAAAA+EAAC2z3BhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABYWVogAAAAAAAA9tYAAQAAAADTLW1sdWMAAAAAAAAAAQAAAAxlblVTAAAAIAAAABwARwBvAG8AZwBsAGUAIABJAG4AYwAuACAAMgAwADEANv/bAEMAAwICAwICAwMDAwQDAwQFCAUFBAQFCgcHBggMCgwMCwoLCw0OEhANDhEOCwsQFhARExQVFRUMDxcYFhQYEhQVFP/bAEMBAwQEBQQFCQUFCRQNCw0UFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFP/AABEIAQABAAMBIgACEQEDEQH/xAAdAAACA |
service: tejas-service | |
org: tensorclan | |
frameworkVersion: '2' | |
provider: | |
name: aws | |
runtime: python3.8 | |
stage: dev | |
region: ap-south-1 |
import onnxruntime | |
ort_session = onnxruntime.InferenceSession("simple_pose_estimation.quantized.onnx") | |
def to_numpy(tensor): | |
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() | |
# compute ONNX Runtime output prediction | |
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(tr_img.unsqueeze(0))} | |
ort_outs = ort_session.run(None, ort_inputs) |
from onnxruntime.quantization import quantize | |
from onnxruntime.quantization import QuantizationMode | |
quantized_model = quantize(onnx_model, quantization_mode=QuantizationMode.IntegerOps, static=False) | |
onnx.save(quantized_model, 'simple_pose_estimation.quantized.onnx') | |
print_size_of_onnx_model(quantized_model) |
import io | |
import numpy as np | |
import torch.onnx | |
# Input to the model | |
torch_model = new_model | |
batch_size = 1 | |
x = torch.randn(batch_size, 3, 256, 256, requires_grad=True) | |
torch_out = torch_model(x) |