Skip to content

Instantly share code, notes, and snippets.

@smellslikeml
Last active April 19, 2023 03:09
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save smellslikeml/d153cc0696a5e14a928710c7e23b835c to your computer and use it in GitHub Desktop.
Save smellslikeml/d153cc0696a5e14a928710c7e23b835c to your computer and use it in GitHub Desktop.
U^2Net Triton Inference Server
import os
import time
import cv2
import hashlib
import numpy as np
from PIL import Image
from absl import logging
import tritonclient.http
model_name = "u2net"
url = "localhost:8000"
model_version = "1"
triton_client = tritonclient.http.InferenceServerClient(url=url, verbose=False)
model_metadata = triton_client.get_model_metadata(model_name=model_name, model_version=model_version)
model_config = triton_client.get_model_config(model_name=model_name, model_version=model_version)
data_samples = [[], []]
# TODO: set the path to a directory of image(s)
img_dir = "/path/to/img/dir/"
img_list = os.listdir(img_dir)
imgs = [cv2.imread(img_dir + img) for img in img_list] * 50
batch_size = len(imgs)
images_in = tritonclient.http.InferInput(name="IMAGES",shape=(batch_size, 512,512,3), datatype="INT8")
masks = tritonclient.http.InferRequestedOutput(name="MASKS", binary_data=False)
images_in.set_data_from_numpy(np.asarray(imgs, dtype=np.int8))
if __name__ == "__main__":
start_time = time.time()
response = triton_client.infer(model_name=model_name,model_version=model_version,inputs=[images_in], outputs=[masks],)
print("--- %s seconds ---" % (time.time() - start_time))
print("--- %s samples ---" % (batch_size))
msks = response.as_numpy("MASKS")
print(msks.shape)
# Optional: Uncomment below to save produced masks
#for idx, m in enumerate(msks):
# im = Image.fromarray(m)
# im.save("{}.png".format(str(idx)))
name: "u2net"
max_batch_size: 0
backend: "python"
input [
{
name: "IMAGES"
data_type: TYPE_INT8
dims: [ -1, -1, -1, -1 ]
}
]
output [
{
name: "MASKS"
data_type: TYPE_INT8
dims: [ -1, -1, -1]
}
]
import numpy as np
import paddlehub as hub
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
@staticmethod
def auto_complete_config(auto_complete_model_config):
"""`auto_complete_config` is called only once when loading the model
assuming the server was not started with
`--disable-auto-complete-config`. Implementing this function is
optional. No implementation of `auto_complete_config` will do nothing.
This function can be used to set `max_batch_size`, `input` and `output`
properties of the model using `set_max_batch_size`, `add_input`, and
`add_output`. These properties will allow Triton to load the model with
minimal model configuration in absence of a configuration file. This
function returns the `pb_utils.ModelConfig` object with these
properties. You can use the `as_dict` function to gain read-only access
to the `pb_utils.ModelConfig` object. The `pb_utils.ModelConfig` object
being returned from here will be used as the final configuration for
the model.
Note: The Python interpreter used to invoke this function will be
destroyed upon returning from this function and as a result none of the
objects created here will be available in the `initialize`, `execute`,
or `finalize` functions.
Parameters
----------
auto_complete_model_config : pb_utils.ModelConfig
An object containing the existing model configuration. You can build
upon the configuration given by this object when setting the
properties for this model.
Returns
-------
pb_utils.ModelConfig
An object containing the auto-completed model configuration
"""
inputs = [{
'name': 'IMAGES',
'data_type': 'TYPE_INT8',
'dims': [ -1 , -1, -1, -1]
}]
outputs = [{
'name': 'MASKS',
'data_type': 'TYPE_INT8',
'dims': [-1, -1, -1]
}]
# Demonstrate the usage of `as_dict`, `add_input`, `add_output`,
# `set_max_batch_size`, and `set_dynamic_batching` functions.
# Store the model configuration as a dictionary.
config = auto_complete_model_config.as_dict()
input_names = []
output_names = []
for input in config['input']:
input_names.append(input['name'])
for output in config['output']:
output_names.append(output['name'])
for input in inputs:
# The name checking here is only for demonstrating the usage of
# `as_dict` function. `add_input` will check for conflicts and
# raise errors if an input with the same name already exists in
# the configuration but has different data_type or dims property.
if input['name'] not in input_names:
auto_complete_model_config.add_input(input)
for output in outputs:
# The name checking here is only for demonstrating the usage of
# `as_dict` function. `add_output` will check for conflicts and
# raise errors if an output with the same name already exists in
# the configuration but has different data_type or dims property.
if output['name'] not in output_names:
auto_complete_model_config.add_output(output)
auto_complete_model_config.set_max_batch_size(0)
# To enable a dynamic batcher with default settings, you can use
# auto_complete_model_config set_dynamic_batching() function. It is
# commented in this example because the max_batch_size is zero.
#
# auto_complete_model_config.set_dynamic_batching()
return auto_complete_model_config
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device
ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
self.model = hub.Module(name="U2Net")
print('Initialized...')
def execute(self, requests):
"""`execute` must be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference is requested
for this model.
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
responses = []
# Every Python backend must iterate through list of requests and create
# an instance of pb_utils.InferenceResponse class for each of them. You
# should avoid storing any of the input Tensors in the class attributes
# as they will be overridden in subsequent inference requests. You can
# make a copy of the underlying NumPy array and store it if it is
# required.
for request in requests:
# Perform inference on the request and append it to responses
# list...
images = [
t
for t in pb_utils.get_input_tensor_by_name(request, "IMAGES")
.as_numpy()
]
masks = self.model.Segmentation(images=images, input_size=len(images[0]))
masks = [x["mask"] for x in masks]
masks = np.array([np.asarray(i) for i in masks])
# Sending results
inference_response = pb_utils.InferenceResponse(output_tensors=[
pb_utils.Tensor(
"MASKS",
masks,
)
])
responses.append(inference_response)
# You must return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is optional. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment