I hereby claim:
- I am rpitonak on github.
- I am radopitonak (https://keybase.io/radopitonak) on keybase.
- I have a public key ASDOPNjqQiUljDFt2d7CNo9tCQTdkMqKF9cs1KyYV2WW4Ao
To claim this, I am signing this object:
I hereby claim:
To claim this, I am signing this object:
Summer is not over yet, but coding part of Google summer of code yes. This is my final report.
I was working with fedora organization on conu - python API for your containers. Before GSOC, you could use library just to work with standalone containers. Goal for GSOC was to implement support also for orchestration tools like Kubernetes and OpenShift. For more information see my proposal.
# Adapted from this StackOverflow answer: https://gis.stackexchange.com/a/14717 | |
""" | |
Usage: | |
file_path = "/tmp/file.tiff" | |
split_tiff("/tmp/file.tiff", 256, 256) | |
The resulting output will be chunks /tmp/file_0_0.tiff ... |
model_input_name = "X" | |
X = onnx.helper.make_tensor_value_info( | |
model_input_name, | |
onnx.TensorProto.FLOAT, | |
[None, None, None, 3]) # notice that we are leaving just channels dimension fixed, Batch size, width and heigh is not known in advance | |
model_output_name = "Y" | |
Y = onnx.helper.make_tensor_value_info( | |
model_output_name, |
resizer_node_output_name = "Resize_Y" | |
Y_resize = onnx.helper.make_tensor_value_info(resizer_node_output_name, | |
onnx.TensorProto.FLOAT, | |
[None, 224, 224, 3]) | |
roi = onnx.helper.make_node("Constant", inputs=[], outputs=["roi"], name="roi-constant", | |
value=onnx.helper.make_tensor(name="roi-values", | |
data_type=onnx.TensorProto.FLOAT, |
graph = onnx.helper.make_graph([roi, scales, output_size, resizer_node, transpose_node], "resizer", [X], [Y]) | |
print(graph) | |
# Create the model (ModelProto instance) | |
model_def = onnx.helper.make_model(g, producer_name="onnx-zaitra") | |
# these are optional, just to show what you can set here | |
model_def.opset_import[0].version = 12 | |
model_def.ir_version = 7 |
import onnx | |
# !wget https://github.com/onnx/models/raw/main/vision/classification/squeezenet/model/squeezenet1.0-12.onnx | |
model1 = onnx.load('resizer.onnx') | |
model2 = onnx.load('squeezenet1.0-12.onnx') | |
combined_model = onnx.compose.merge_models( | |
model1, model2, |
# !wget https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n02085936_Maltese_dog.JPEG | |
large_img = np.array(Image.open("n02085936_Maltese_dog.JPEG")) | |
print(f"Image shape: {large_img.shape}") | |
plt.imshow(large_img) | |
# Output: | |
# Image shape: (500, 375, 3) | |
sess = rt.InferenceSession("combined_dynamic_input.onnx") # Start the inference session and open the model |
# pip install wget | |
# pip install -i https://test.pypi.org/simple/ onnx-weekly | |
# pip install onnxruntime | |
import onnx | |
import numpy as np | |
import onnxruntime as rt | |
import matplotlib.pyplot as plt | |
from PIL import Image |
# Permute dimension, after resize they are in this order | |
# N W H C | |
# 0 1 2 3 | |
# After transpose we want them in shape | |
# N C W H | |
# 0 3 1 2 | |
perm = onnx.helper.make_node("Constant", inputs=[], outputs=["perm"], name="perm-constant", | |
value=onnx.helper.make_tensor(name="perm-values", | |
data_type=onnx.TensorProto.INT64, | |
dims=np.array([0,3,1,2]).shape, |