Skip to content

Instantly share code, notes, and snippets.

@dai1741
Forked from altbridgetech/TensorRT_mnist.ipynb
Last active January 2, 2022 09:12
Show Gist options
  • Save dai1741/4a8c082761e8291280121d9ca242b1b8 to your computer and use it in GitHub Desktop.
Save dai1741/4a8c082761e8291280121d9ca242b1b8 to your computer and use it in GitHub Desktop.
TensorRT-tar-installation
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "try_tensorrt_installation_and_sample.ipynb",
"provenance": [],
"collapsed_sections": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "code",
"source": [
"# 下記ページをベースにTensorRT-8.2.2.1をColabで動作させる。\n",
"# Google Colaboratory で TensorRT を使って機械学習の推論を爆速化してみる【環境構築編(tar版)】\n",
"# https://note.com/altbridgetech/n/ndec5681d666c\n",
"\n",
"# 事前に TensorRT-8.2.2.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz をGoogle Drive上に用意しておく\n",
"from google.colab import drive\n",
"drive.mount(\"/content/gdrive\")"
],
"metadata": {
"id": "xA8jcVkJJ4JS",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "cf74a65b-e2fa-4a73-e2e6-088d7b7a823c"
},
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Mounted at /content/gdrive\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!nvidia-smi"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Mn6u-mYR9pJd",
"outputId": "523031c4-3eee-4a86-d7b4-954ed4d88f00"
},
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Sun Jan 2 09:02:53 2022 \n",
"+-----------------------------------------------------------------------------+\n",
"| NVIDIA-SMI 495.44 Driver Version: 460.32.03 CUDA Version: 11.2 |\n",
"|-------------------------------+----------------------+----------------------+\n",
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
"| | | MIG M. |\n",
"|===============================+======================+======================|\n",
"| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |\n",
"| N/A 42C P0 26W / 250W | 0MiB / 16280MiB | 0% Default |\n",
"| | | N/A |\n",
"+-------------------------------+----------------------+----------------------+\n",
" \n",
"+-----------------------------------------------------------------------------+\n",
"| Processes: |\n",
"| GPU GI CI PID Type Process name GPU Memory |\n",
"| ID ID Usage |\n",
"|=============================================================================|\n",
"| No running processes found |\n",
"+-----------------------------------------------------------------------------+\n"
]
}
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "PHPDqV3CJeYD",
"outputId": "0c755de9-ffdf-437b-d1d6-2486418b78c0"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Ubuntu 18.04.5 LTS \\n \\l\n",
"\n",
"nvcc: NVIDIA (R) Cuda compiler driver\n",
"Copyright (c) 2005-2020 NVIDIA Corporation\n",
"Built on Mon_Oct_12_20:09:46_PDT_2020\n",
"Cuda compilation tools, release 11.1, V11.1.105\n",
"Build cuda_11.1.TC455_06.29190527_0\n",
"Python 3.7.12\n",
"ii libcudnn7 7.6.5.32-1+cuda10.1 amd64 cuDNN runtime libraries\n",
"ii libcudnn7-dev 7.6.5.32-1+cuda10.1 amd64 cuDNN development libraries and headers\n",
"hi libcudnn8 8.0.5.39-1+cuda11.1 amd64 cuDNN runtime libraries\n",
"ii libcudnn8-dev 8.0.5.39-1+cuda11.1 amd64 cuDNN development libraries and headers\n"
]
}
],
"source": [
"!cat /etc/issue\n",
"!nvcc -V\n",
"!python -V\n",
"!dpkg -l | grep \"cudnn\""
]
},
{
"cell_type": "code",
"source": [
"!tar -zxf /content/gdrive/MyDrive/TensorRT-8.2.2.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz"
],
"metadata": {
"id": "S_TWYntJa7cT"
},
"execution_count": 4,
"outputs": []
},
{
"cell_type": "code",
"source": [
"%cd /content/TensorRT-8.2.2.1/python/\n",
"!sudo pip3 install tensorrt-8.2.2.1-cp37-none-linux_x86_64.whl"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "gIc4GcG_fPu6",
"outputId": "f6d49fac-a1fc-438d-bbe9-ce99161d9531"
},
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/TensorRT-8.2.2.1/python\n",
"Processing ./tensorrt-8.2.2.1-cp37-none-linux_x86_64.whl\n",
"Installing collected packages: tensorrt\n",
"Successfully installed tensorrt-8.2.2.1\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%cd /content/TensorRT-8.2.2.1/uff/\n",
"!sudo pip3 install uff-0.6.9-py2.py3-none-any.whl"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "1ZkGwK0swi5R",
"outputId": "84209403-b1eb-4799-fa43-799d26c71fc9"
},
"execution_count": 6,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/TensorRT-8.2.2.1/uff\n",
"Processing ./uff-0.6.9-py2.py3-none-any.whl\n",
"Requirement already satisfied: numpy>=1.11.0 in /usr/local/lib/python3.7/dist-packages (from uff==0.6.9) (1.19.5)\n",
"Requirement already satisfied: protobuf>=3.3.0 in /usr/local/lib/python3.7/dist-packages (from uff==0.6.9) (3.17.3)\n",
"Requirement already satisfied: six>=1.9 in /usr/local/lib/python3.7/dist-packages (from protobuf>=3.3.0->uff==0.6.9) (1.15.0)\n",
"Installing collected packages: uff\n",
"Successfully installed uff-0.6.9\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%cd /content/TensorRT-8.2.2.1/graphsurgeon/\n",
"!sudo pip3 install graphsurgeon-0.4.5-py2.py3-none-any.whl"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "o-DN4HXvwxF6",
"outputId": "b4047230-0982-41aa-e5d8-5106d22ccc75"
},
"execution_count": 7,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/TensorRT-8.2.2.1/graphsurgeon\n",
"Processing ./graphsurgeon-0.4.5-py2.py3-none-any.whl\n",
"Installing collected packages: graphsurgeon\n",
"Successfully installed graphsurgeon-0.4.5\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%cd /content/TensorRT-8.2.2.1/onnx_graphsurgeon/\n",
"!sudo pip3 install onnx_graphsurgeon-0.3.12-py2.py3-none-any.whl"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "HeldAYL1w8N1",
"outputId": "1e5d03a1-d5da-437e-8c40-9da60efa860c"
},
"execution_count": 8,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/TensorRT-8.2.2.1/onnx_graphsurgeon\n",
"Processing ./onnx_graphsurgeon-0.3.12-py2.py3-none-any.whl\n",
"Collecting onnx\n",
" Downloading onnx-1.10.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (12.7 MB)\n",
"\u001b[K |████████████████████████████████| 12.7 MB 11.1 MB/s \n",
"\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from onnx-graphsurgeon==0.3.12) (1.19.5)\n",
"Requirement already satisfied: protobuf in /usr/local/lib/python3.7/dist-packages (from onnx->onnx-graphsurgeon==0.3.12) (3.17.3)\n",
"Requirement already satisfied: typing-extensions>=3.6.2.1 in /usr/local/lib/python3.7/dist-packages (from onnx->onnx-graphsurgeon==0.3.12) (3.10.0.2)\n",
"Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from onnx->onnx-graphsurgeon==0.3.12) (1.15.0)\n",
"Installing collected packages: onnx, onnx-graphsurgeon\n",
"Successfully installed onnx-1.10.2 onnx-graphsurgeon-0.3.12\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# なぜか LD_LIBRARY_PATH を変更する方法がうまくいかないので、強引にライブラリファイルを /usr/lib/ にコピーする\n",
"# import os\n",
"# os.environ['LD_LIBRARY_PATH'] += ':/content/TensorRT-8.2.2.1/lib'\n",
"# !echo $LD_LIBRARY_PATH\n",
"!sudo cp /content/TensorRT-8.2.2.1/lib/* /usr/lib/"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "nLQ0_mgyvEZk",
"outputId": "c71f0b7f-596a-4421-d7c9-b0db05cdde0d"
},
"execution_count": 9,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"cp: -r not specified; omitting directory '/content/TensorRT-8.2.2.1/lib/stubs'\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"%cd /content/TensorRT-8.2.2.1/samples/python/network_api_pytorch_mnist/"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "jaPLMyrRxOLY",
"outputId": "0613b1c4-ecaf-4888-a59c-445aac3ae1ea"
},
"execution_count": 10,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/TensorRT-8.2.2.1/samples/python/network_api_pytorch_mnist\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!python3 -m pip install -r requirements.txt"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "bAz1YJa_xaxe",
"outputId": "7590bd2b-9f38-42b0-ce32-fa8f094b4231"
},
"execution_count": 12,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Looking in links: https://download.pytorch.org/whl/torch_stable.html\n",
"Ignoring numpy: markers 'python_version < \"3.7\"' don't match your environment\n",
"Ignoring torch: markers 'platform_machine == \"aarch64\" and sys_platform == \"linux\"' don't match your environment\n",
"Ignoring torchvision: markers 'platform_machine == \"aarch64\" and sys_platform == \"linux\"' don't match your environment\n",
"Ignoring Pillow: markers 'python_version < \"3.6\"' don't match your environment\n",
"Collecting numpy==1.20.2\n",
" Downloading numpy-1.20.2-cp37-cp37m-manylinux2010_x86_64.whl (15.3 MB)\n",
"\u001b[K |████████████████████████████████| 15.3 MB 9.6 MB/s \n",
"\u001b[?25hCollecting torch==1.9.0+cpu\n",
" Downloading https://download.pytorch.org/whl/cpu/torch-1.9.0%2Bcpu-cp37-cp37m-linux_x86_64.whl (175.5 MB)\n",
"\u001b[K |████████████████████████████████| 175.5 MB 14 kB/s \n",
"\u001b[?25hCollecting torchvision==0.10.0+cpu\n",
" Downloading https://download.pytorch.org/whl/cpu/torchvision-0.10.0%2Bcpu-cp37-cp37m-linux_x86_64.whl (15.7 MB)\n",
"\u001b[K |████████████████████████████████| 15.7 MB 1.4 MB/s \n",
"\u001b[?25hCollecting Pillow==8.3.2\n",
" Downloading Pillow-8.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.0 MB)\n",
"\u001b[K |████████████████████████████████| 3.0 MB 39.3 MB/s \n",
"\u001b[?25hCollecting pycuda<2021.1\n",
" Downloading pycuda-2020.1.tar.gz (1.6 MB)\n",
"\u001b[K |████████████████████████████████| 1.6 MB 42.3 MB/s \n",
"\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 11)) (2.23.0)\n",
"Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch==1.9.0+cpu->-r requirements.txt (line 4)) (3.10.0.2)\n",
"Collecting pytools>=2011.2\n",
" Downloading pytools-2021.2.9.tar.gz (66 kB)\n",
"\u001b[K |████████████████████████████████| 66 kB 4.5 MB/s \n",
"\u001b[?25hRequirement already satisfied: decorator>=3.2.0 in /usr/local/lib/python3.7/dist-packages (from pycuda<2021.1->-r requirements.txt (line 10)) (4.4.2)\n",
"Requirement already satisfied: appdirs>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from pycuda<2021.1->-r requirements.txt (line 10)) (1.4.4)\n",
"Collecting mako\n",
" Downloading Mako-1.1.6-py2.py3-none-any.whl (75 kB)\n",
"\u001b[K |████████████████████████████████| 75 kB 4.3 MB/s \n",
"\u001b[?25hRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->-r requirements.txt (line 11)) (2021.10.8)\n",
"Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->-r requirements.txt (line 11)) (1.24.3)\n",
"Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->-r requirements.txt (line 11)) (2.10)\n",
"Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->-r requirements.txt (line 11)) (3.0.4)\n",
"Requirement already satisfied: MarkupSafe>=0.9.2 in /usr/local/lib/python3.7/dist-packages (from mako->pycuda<2021.1->-r requirements.txt (line 10)) (2.0.1)\n",
"Building wheels for collected packages: pycuda, pytools\n",
" Building wheel for pycuda (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for pycuda: filename=pycuda-2020.1-cp37-cp37m-linux_x86_64.whl size=620949 sha256=adc095ec2e74726aea99bf15bdc3ffdb1f7166b7ff3e2488a762b19590523d8f\n",
" Stored in directory: /root/.cache/pip/wheels/56/6d/ac/a3126000ecbd72aea765273679cc08c4d8d84eee8fa8cecc8c\n",
" Building wheel for pytools (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for pytools: filename=pytools-2021.2.9-py2.py3-none-any.whl size=62370 sha256=58a816ac31dec1161f4e11839fa571599bb5a71c78724612e6bebea6ca2bd1ff\n",
" Stored in directory: /root/.cache/pip/wheels/41/b9/6e/94bb014f6484b15ec77e7877f3a227609481ffd98db364504d\n",
"Successfully built pycuda pytools\n",
"Installing collected packages: numpy, torch, pytools, Pillow, mako, torchvision, pycuda\n",
" Attempting uninstall: numpy\n",
" Found existing installation: numpy 1.19.5\n",
" Uninstalling numpy-1.19.5:\n",
" Successfully uninstalled numpy-1.19.5\n",
" Attempting uninstall: torch\n",
" Found existing installation: torch 1.10.0+cu111\n",
" Uninstalling torch-1.10.0+cu111:\n",
" Successfully uninstalled torch-1.10.0+cu111\n",
" Attempting uninstall: Pillow\n",
" Found existing installation: Pillow 7.1.2\n",
" Uninstalling Pillow-7.1.2:\n",
" Successfully uninstalled Pillow-7.1.2\n",
" Attempting uninstall: torchvision\n",
" Found existing installation: torchvision 0.11.1+cu111\n",
" Uninstalling torchvision-0.11.1+cu111:\n",
" Successfully uninstalled torchvision-0.11.1+cu111\n",
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
"yellowbrick 1.3.post1 requires numpy<1.20,>=1.16.0, but you have numpy 1.20.2 which is incompatible.\n",
"torchtext 0.11.0 requires torch==1.10.0, but you have torch 1.9.0+cpu which is incompatible.\n",
"torchaudio 0.10.0+cu111 requires torch==1.10.0, but you have torch 1.9.0+cpu which is incompatible.\n",
"datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.\n",
"albumentations 0.1.12 requires imgaug<0.2.7,>=0.2.5, but you have imgaug 0.2.9 which is incompatible.\u001b[0m\n",
"Successfully installed Pillow-8.3.2 mako-1.1.6 numpy-1.20.2 pycuda-2020.1 pytools-2021.2.9 torch-1.9.0+cpu torchvision-0.10.0+cpu\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!python3 sample.py"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "bAxxx_yxyWyG",
"outputId": "677dc745-b315-4543-8c7c-f16b02c16d7b"
},
"execution_count": 13,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\n",
"Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to /tmp/mnist/data/MNIST/raw/train-images-idx3-ubyte.gz\n",
"9913344it [00:00, 91787756.64it/s] \n",
"Extracting /tmp/mnist/data/MNIST/raw/train-images-idx3-ubyte.gz to /tmp/mnist/data/MNIST/raw\n",
"\n",
"Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\n",
"Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz to /tmp/mnist/data/MNIST/raw/train-labels-idx1-ubyte.gz\n",
"29696it [00:00, 74897204.80it/s]\n",
"Extracting /tmp/mnist/data/MNIST/raw/train-labels-idx1-ubyte.gz to /tmp/mnist/data/MNIST/raw\n",
"\n",
"Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\n",
"Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz to /tmp/mnist/data/MNIST/raw/t10k-images-idx3-ubyte.gz\n",
"1649664it [00:00, 53655866.88it/s]\n",
"Extracting /tmp/mnist/data/MNIST/raw/t10k-images-idx3-ubyte.gz to /tmp/mnist/data/MNIST/raw\n",
"\n",
"Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\n",
"Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz to /tmp/mnist/data/MNIST/raw/t10k-labels-idx1-ubyte.gz\n",
"5120it [00:00, 24999809.64it/s]\n",
"Extracting /tmp/mnist/data/MNIST/raw/t10k-labels-idx1-ubyte.gz to /tmp/mnist/data/MNIST/raw\n",
"\n",
"/usr/local/lib/python3.7/dist-packages/torchvision/datasets/mnist.py:498: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /pytorch/torch/csrc/utils/tensor_numpy.cpp:180.)\n",
" return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)\n",
"/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.)\n",
" return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n",
"Train Epoch: 1 [0/60000 (0%)]\tLoss: 2.285411\n",
"Train Epoch: 1 [6400/60000 (11%)]\tLoss: 0.361572\n",
"Train Epoch: 1 [12800/60000 (21%)]\tLoss: 0.412648\n",
"Train Epoch: 1 [19200/60000 (32%)]\tLoss: 0.177222\n",
"Train Epoch: 1 [25600/60000 (43%)]\tLoss: 0.296752\n",
"Train Epoch: 1 [32000/60000 (53%)]\tLoss: 0.096482\n",
"Train Epoch: 1 [38400/60000 (64%)]\tLoss: 0.323602\n",
"Train Epoch: 1 [44800/60000 (75%)]\tLoss: 0.142474\n",
"Train Epoch: 1 [51200/60000 (85%)]\tLoss: 0.044940\n",
"Train Epoch: 1 [57600/60000 (96%)]\tLoss: 0.062012\n",
"\n",
"Test set: Average loss: 0.1136, Accuracy: 9624/10000 (96%)\n",
"\n",
"Train Epoch: 2 [0/60000 (0%)]\tLoss: 0.043669\n",
"Train Epoch: 2 [6400/60000 (11%)]\tLoss: 0.119768\n",
"Train Epoch: 2 [12800/60000 (21%)]\tLoss: 0.041819\n",
"Train Epoch: 2 [19200/60000 (32%)]\tLoss: 0.137233\n",
"Train Epoch: 2 [25600/60000 (43%)]\tLoss: 0.114953\n",
"Train Epoch: 2 [32000/60000 (53%)]\tLoss: 0.038673\n",
"Train Epoch: 2 [38400/60000 (64%)]\tLoss: 0.064073\n",
"Train Epoch: 2 [44800/60000 (75%)]\tLoss: 0.018291\n",
"Train Epoch: 2 [51200/60000 (85%)]\tLoss: 0.022708\n",
"Train Epoch: 2 [57600/60000 (96%)]\tLoss: 0.152664\n",
"\n",
"Test set: Average loss: 0.0568, Accuracy: 9812/10000 (98%)\n",
"\n",
"sample.py:45: DeprecationWarning: Use add_convolution_nd instead.\n",
" conv1 = network.add_convolution(input=input_tensor, num_output_maps=20, kernel_shape=(5, 5), kernel=conv1_w, bias=conv1_b)\n",
"sample.py:46: DeprecationWarning: Use stride_nd instead.\n",
" conv1.stride = (1, 1)\n",
"sample.py:48: DeprecationWarning: Use add_pooling_nd instead.\n",
" pool1 = network.add_pooling(input=conv1.get_output(0), type=trt.PoolingType.MAX, window_size=(2, 2))\n",
"sample.py:49: DeprecationWarning: Use stride_nd instead.\n",
" pool1.stride = (2, 2)\n",
"sample.py:53: DeprecationWarning: Use add_convolution_nd instead.\n",
" conv2 = network.add_convolution(pool1.get_output(0), 50, (5, 5), conv2_w, conv2_b)\n",
"sample.py:54: DeprecationWarning: Use stride_nd instead.\n",
" conv2.stride = (1, 1)\n",
"sample.py:56: DeprecationWarning: Use add_pooling_nd instead.\n",
" pool2 = network.add_pooling(conv2.get_output(0), trt.PoolingType.MAX, (2, 2))\n",
"sample.py:57: DeprecationWarning: Use stride_nd instead.\n",
" pool2.stride = (2, 2)\n",
"[01/02/2022-09:09:10] [TRT] [W] TensorRT was linked against cuBLAS/cuBLAS LT 11.6.3 but loaded cuBLAS/cuBLAS LT 11.3.0\n",
"[01/02/2022-09:09:11] [TRT] [W] TensorRT was linked against cuDNN 8.2.1 but loaded cuDNN 8.0.5\n",
"[01/02/2022-09:09:18] [TRT] [W] TensorRT was linked against cuBLAS/cuBLAS LT 11.6.3 but loaded cuBLAS/cuBLAS LT 11.3.0\n",
"[01/02/2022-09:09:18] [TRT] [W] TensorRT was linked against cuDNN 8.2.1 but loaded cuDNN 8.0.5\n",
"[01/02/2022-09:09:18] [TRT] [W] TensorRT was linked against cuBLAS/cuBLAS LT 11.6.3 but loaded cuBLAS/cuBLAS LT 11.3.0\n",
"[01/02/2022-09:09:18] [TRT] [W] TensorRT was linked against cuDNN 8.2.1 but loaded cuDNN 8.0.5\n",
"[01/02/2022-09:09:18] [TRT] [W] TensorRT was linked against cuBLAS/cuBLAS LT 11.6.3 but loaded cuBLAS/cuBLAS LT 11.3.0\n",
"[01/02/2022-09:09:18] [TRT] [W] TensorRT was linked against cuDNN 8.2.1 but loaded cuDNN 8.0.5\n",
"Test Case: 4\n",
"Prediction: 4\n"
]
}
]
},
{
"cell_type": "code",
"source": [
""
],
"metadata": {
"id": "O7orH8qz_LlA"
},
"execution_count": null,
"outputs": []
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment