Skip to content

Instantly share code, notes, and snippets.

@HoangTienDuc
Created October 11, 2021 14:24
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save HoangTienDuc/e2c42f28488ae30c3d71120bc5260496 to your computer and use it in GitHub Desktop.
Save HoangTienDuc/e2c42f28488ae30c3d71120bc5260496 to your computer and use it in GitHub Desktop.
bug to report nvidia
import numpy as np
import tritonclient.http as httpclient
import time
import cv2
inputs = []
outputs = []
input_name = "input_1"
output_name = "predictions/Softmax"
image = cv2.imread("/data/dataset/train/withmask/2_WORLD_Coronavirus_083975_withmask_4.jpg")
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
resized_rgb_image = cv2.resize(image_rgb, (224, 224))
normed_image = resized_rgb_image.astype('float32') / 255.0
trt_input = np.transpose(normed_image, (2, 0, 1))
image_data = np.expand_dims(trt_input, axis=0)
inputs.append(httpclient.InferInput(input_name, image_data.shape, "FP32"))
outputs.append(httpclient.InferRequestedOutput(output_name))
inputs[0].set_data_from_numpy(image_data)
start_time = time.time()
# Test with outputs
triton_client = httpclient.InferenceServerClient(
url="localhost:8000")
results = triton_client.infer(model_name="antisproofing",
inputs=inputs,
outputs=outputs)
latency = time.time() - start_time
output0_data = results.as_numpy(output_name)
trt_outputs = [out[0][0] for out in output0_data[0]]
OUTPUT_LABLES = ["fake", "real", "withmask"]
output_index = np.argmax(trt_outputs, axis=0)
output_prob = trt_outputs[output_index]
output_label = OUTPUT_LABLES[output_index]
print("trt_outputs: ", trt_outputs)
print(f'Inference outputs {output_label}: {output_prob}')
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import tensorrt as trt
import cv2
import torch
TRT_LOGGER = trt.Logger()
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
out_shapes = []
input_shapes = []
out_names = []
max_batch_size = engine.max_batch_size
for binding in engine:
binding_shape = engine.get_binding_shape(binding)
#Fix -1 dimension for proper memory allocation for batch_size > 1
if binding_shape[0] == -1:
binding_shape = (1,) + binding_shape[1:]
size = trt.volume(binding_shape) * max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
input_shapes.append(engine.get_binding_shape(binding))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
#Collect original output shapes and names from engine
out_shapes.append(engine.get_binding_shape(binding))
out_names.append(binding)
return inputs, outputs, bindings, stream, input_shapes, out_shapes, out_names, max_batch_size
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
class TrtModel(object):
def __init__(self, model):
self.engine_file = model
self.engine = None
self.inputs = None
self.outputs = None
self.bindings = None
self.stream = None
self.context = None
self.input_shapes = None
self.out_shapes = None
self.max_batch_size = 1
self.cuda_ctx = cuda.Device(0).make_context()
if self.cuda_ctx:
self.cuda_ctx.push()
def build(self):
with open(self.engine_file, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
self.engine = runtime.deserialize_cuda_engine(f.read())
self.inputs, self.outputs, self.bindings, self.stream, self.input_shapes, self.out_shapes, self.out_names, self.max_batch_size = allocate_buffers(
self.engine)
self.context = self.engine.create_execution_context()
self.context.active_optimization_profile = 0
if self.cuda_ctx:
self.cuda_ctx.pop()
def run(self, input, deflatten: bool = True, as_dict=False):
# lazy load implementation
if self.engine is None:
self.build()
if self.cuda_ctx:
self.cuda_ctx.push()
input = np.asarray(input)
batch_size = input.shape[0]
allocate_place = np.prod(input.shape)
self.inputs[0].host[:allocate_place] = input.flatten(order='C').astype(np.float32)
self.context.set_binding_shape(0, input.shape[1:])
trt_outputs = do_inference(
self.context, bindings=self.bindings,
inputs=self.inputs, outputs=self.outputs, stream=self.stream)
if self.cuda_ctx:
self.cuda_ctx.pop()
#Reshape TRT outputs to original shape instead of flattened array
if deflatten:
trt_outputs = [torch.from_numpy(output.reshape(shape)) for output, shape in zip(trt_outputs, self.out_shapes)]
if as_dict:
return {name: trt_outputs[i] for i, name in enumerate(self.out_names)}
return trt_outputs
engine = TrtModel("/data/mobilenet_tensort721.engine")
engine.build()
image = cv2.imread("/data/dataset/train/withmask/2_WORLD_Coronavirus_083975_withmask_4.jpg")
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
resized_rgb_image = cv2.resize(image_rgb, (224, 224))
normed_image = resized_rgb_image.astype('float32') / 255.0
trt_input = np.transpose(normed_image, (2, 0, 1))
trt_input = np.expand_dims(trt_input, axis=0)
trt_outputs = engine.run(trt_input)
print("trt_output: ", np.array(trt_outputs[0]).shape)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment