1.2 includes a new, easier-to-use API for converting nn.Module
s into ScriptModule
s. A sample usage is:
class MyModule(torch.nn.Module):
...
# Construct an nn.Module instance
module = MyModule(args)
Building wheel torch-1.0.0a0+1acaafb | |
running build | |
running build_deps | |
+ SYNC_COMMAND=cp | |
++ command -v rsync | |
+ '[' -x /usr/bin/rsync ']' | |
+ SYNC_COMMAND='rsync -lptgoD' | |
+ CMAKE_COMMAND=cmake | |
++ command -v cmake3 | |
+ [[ -x '' ]] |
133 Adam Paszke | |
100 Zachary DeVito | |
85 Edward Z. Yang | |
60 Zach DeVito | |
56 Peter Goldsborough | |
54 James Reed | |
35 Michael Suo | |
30 Elias Ellison | |
30 David Riazati | |
27 Lu Fang |
09:48:34 initialize.cc:271 I initialize in directory /Users/suo/pytorch with uri file:///Users/suo/pytorch | |
09:48:34 initialize.cc:294 I initializationOptions: {"compilationDatabaseCommand":"","compilationDatabaseDirectory":"","cache":{"directory":"/tmp/.ccls-cache","format":"binary","hierarchicalPath":false,"retainInMemory":2},"capabilities":{"documentOnTypeFormattingProvider":{"firstTriggerCharacter":"}","moreTriggerCharacter":[]},"foldingRangeProvider":true,"workspace":{"workspaceFolders":{"supported":true,"changeNotifications":true}}},"clang":{"excludeArgs":[],"extraArgs":[],"pathMappings":[],"resourceDir":""},"client":{"hierarchicalDocumentSymbolSupport":true,"linkSupport":true,"snippetSupport":true},"codeLens":{"localVariables":true},"completion":{"caseSensitivity":2,"detailedLabel":true,"dropOldRequests":true,"duplicateOptional":true,"filterAndSort":true,"include":{"blacklist":[],"maxPathSize":30,"suffixWhitelist":[".h",".hpp",".hh",".inc"],"whitelist":[]},"maxNum":100},"di |
## Highlights | |
### New TorchScript API | |
1.2 includes a new, easier-to-use API for converting `nn.Module`s into `ScriptModule`s. A sample usage is: | |
``` | |
class MyModule(torch.nn.Module): | |
... | |
# Construct an nn.Module instance | |
module = MyModule(args) |
class Conv2d(torch.nn.Module): | |
def __init__(self, in_channels, out_channels, kernel_size, stride=1, | |
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', | |
bn=True): | |
super(Conv2d, self).__init__() | |
# Implementation of Quantized Conv2d layer defintion | |
# TODO: filter the inputs and detect any parameter specified as None | |
# that will be optimized by Prometheus | |
self.in_channels = in_channels |
import torch | |
import torch.nn as nn | |
import time | |
BN_MOMENTUM = 0.1 | |
class Bottleneck_down(nn.Module): | |
expansion = 4 |
Num: Value Size Type Bind Vis Ndx Name | |
14: 0000000000000000 0 TLS GLOBAL DEFAULT UND _ZSt11__once_call@GLIBCXX_3.4.11 (6) | |
15: 0000000000000000 0 TLS GLOBAL DEFAULT UND _ZSt15__once_callable@GLIBCXX_3.4.11 (6) | |
626: 0000000000000020 16 TLS LOCAL DEFAULT 20 _ZN2at12_GLOBAL__N_110debug_infoE | |
627: 0000000000000030 1 TLS LOCAL DEFAULT 20 __tls_guard | |
1233: 0000000000000000 1 TLS LOCAL HIDDEN 19 _ZN2at16GradMode_enabledE | |
2039: 0000000000000050 1 TLS LOCAL DEFAULT 20 __tls_guard | |
2043: 0000000000000038 24 TLS LOCAL HIDDEN 20 _ZN5torch3jit6script5callsE | |
6322: 0000000000000060 8 TLS LOCAL DEFAULT 20 _ZN2at6nativeL14workspace_sizeE | |
6325: 0000000000000058 8 TLS LOCAL DEFAULT 20 _ZN2at6nativeL9workspaceE |
--- before.txt 2019-08-29 20:40:01.565468578 -0700 | |
+++ after.txt 2019-08-30 00:10:23.959952597 -0700 | |
@@ -3,7 +3,10 @@ | |
binary_linux_libtorch_2_7m_cu92_gcc5_4_cxx11-abi_shared-without-deps_upload: | |
environment: | |
- BUILD_ENVIRONMENT: libtorch 2.7m cu92 gcc5.4_cxx11-abi | |
+ - DOCKER_IMAGE: '' | |
+ - USE_CUDA_DOCKER_RUNTIME: '' | |
- LIBTORCH_VARIANT: shared-without-deps | |
+ resource_class: medium |
--- before.txt 2019-08-30 13:40:47.531939741 -0700 | |
+++ after.txt 2019-08-30 13:54:42.624637596 -0700 | |
@@ -553,7 +553,6 @@ | |
environment: | |
- BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-x86_64-build | |
- DOCKER_IMAGE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:339 | |
- - PYTHON_VERSION: '3.6' | |
- USE_CUDA_DOCKER_RUNTIME: '' | |
- MULTI_GPU: '' | |
resource_class: large |