Skip to content

Instantly share code, notes, and snippets.

View rpitonak's full-sized avatar

Radoslav Pitoňák rpitonak

View GitHub Profile
resizer_node = onnx.helper.make_node("Resize", inputs=[model_input_name, "roi", "scales", "output_size"], outputs=[resizer_node_output_name])
transpose_node = onnx.helper.make_node("Transpose", inputs=[resizer_node_output_name], outputs=[model_output_name], perm=[0,3,1,2])
# Permute dimension, after resize they are in this order
# N W H C
# 0 1 2 3
# After transpose we want them in shape
# N C W H
# 0 3 1 2
perm = onnx.helper.make_node("Constant", inputs=[], outputs=["perm"], name="perm-constant",
value=onnx.helper.make_tensor(name="perm-values",
data_type=onnx.TensorProto.INT64,
dims=np.array([0,3,1,2]).shape,
# pip install wget
# pip install -i https://test.pypi.org/simple/ onnx-weekly
# pip install onnxruntime
import onnx
import numpy as np
import onnxruntime as rt
import matplotlib.pyplot as plt
from PIL import Image
# !wget https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n02085936_Maltese_dog.JPEG
large_img = np.array(Image.open("n02085936_Maltese_dog.JPEG"))
print(f"Image shape: {large_img.shape}")
plt.imshow(large_img)
# Output:
# Image shape: (500, 375, 3)
sess = rt.InferenceSession("combined_dynamic_input.onnx") # Start the inference session and open the model
import onnx
# !wget https://github.com/onnx/models/raw/main/vision/classification/squeezenet/model/squeezenet1.0-12.onnx
model1 = onnx.load('resizer.onnx')
model2 = onnx.load('squeezenet1.0-12.onnx')
combined_model = onnx.compose.merge_models(
model1, model2,
graph = onnx.helper.make_graph([roi, scales, output_size, resizer_node, transpose_node], "resizer", [X], [Y])
print(graph)
# Create the model (ModelProto instance)
model_def = onnx.helper.make_model(g, producer_name="onnx-zaitra")
# these are optional, just to show what you can set here
model_def.opset_import[0].version = 12
model_def.ir_version = 7
resizer_node_output_name = "Resize_Y"
Y_resize = onnx.helper.make_tensor_value_info(resizer_node_output_name,
onnx.TensorProto.FLOAT,
[None, 224, 224, 3])
roi = onnx.helper.make_node("Constant", inputs=[], outputs=["roi"], name="roi-constant",
value=onnx.helper.make_tensor(name="roi-values",
data_type=onnx.TensorProto.FLOAT,
model_input_name = "X"
X = onnx.helper.make_tensor_value_info(
model_input_name,
onnx.TensorProto.FLOAT,
[None, None, None, 3]) # notice that we are leaving just channels dimension fixed, Batch size, width and heigh is not known in advance
model_output_name = "Y"
Y = onnx.helper.make_tensor_value_info(
model_output_name,
@rpitonak
rpitonak / split_geotiff.py
Last active October 30, 2020 17:40
Split GeoTiff to multiple tiff files (chunks) using Python3
# Adapted from this StackOverflow answer: https://gis.stackexchange.com/a/14717
"""
Usage:
file_path = "/tmp/file.tiff"
split_tiff("/tmp/file.tiff", 256, 256)
The resulting output will be chunks /tmp/file_0_0.tiff ...

GSOC 2018

Summer is not over yet, but coding part of Google summer of code yes. This is my final report.

Quick recap

I was working with fedora organization on conu - python API for your containers. Before GSOC, you could use library just to work with standalone containers. Goal for GSOC was to implement support also for orchestration tools like Kubernetes and OpenShift. For more information see my proposal.