Table of contents
- Introduction
- Prerequires
- Installation
- How to use
- Todo
import pycuda.driver as cuda | |
import pycuda.autoinit | |
import numpy as np | |
import tensorrt as trt | |
import cv2 | |
from hyperpose import Config,Model | |
TRT_LOGGER = trt.Logger() |
import numpy as np | |
import tritonclient.http as httpclient | |
import time | |
import cv2 | |
inputs = [] | |
outputs = [] | |
input_name = "input_1" | |
output_name = "predictions/Softmax" |
import pycuda.driver as cuda | |
import pycuda.autoinit | |
import numpy as np | |
from decoder import CifCafDecoder | |
import tensorrt as trt | |
import cv2 | |
import openpifpaf | |
import torch |
# Run server: nvidia-docker run --rm --name trtserver -p 8000:8000 -p 8001:8001 -v `pwd`:/models nvcr.io/nvidia/tritonserver:20.03.1-py3 trtserver --model-store=/models --api-version=2 | |
# Run client: nvidia-docker run -it -v `pwd`:/data --rm --net=host triton:20.03.1 | |
# Run server: nvidia-docker run --rm --name trtserver -p 8000:8000 -p 8001:8001 -v `pwd`:/models nvcr.io/nvidia/tritonserver:20.03.1-py3 trtserver --model-store=/models --api-version=2 | |
# Run client: nvidia-docker run -it -v `pwd`:/data --rm --net=host triton:20.03.1 | |
#!/usr/bin/env python | |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. |