Skip to content

Instantly share code, notes, and snippets.

@jamesr66a
Created October 10, 2018 21:35
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jamesr66a/9015983e54bbbea9ca9de1a799611eaf to your computer and use it in GitHub Desktop.
Save jamesr66a/9015983e54bbbea9ca9de1a799611eaf to your computer and use it in GitHub Desktop.
diff --git a/aten/src/ATen/Declarations.cwrap b/aten/src/ATen/Declarations.cwrap
index 2ae1d649c..aee44d5a9 100644
--- a/aten/src/ATen/Declarations.cwrap
+++ b/aten/src/ATen/Declarations.cwrap
@@ -149,13 +149,6 @@
- THTensor* self
]]
[[
- name: contiguous
- cname: newContiguous
- return: THTensor*
- arguments:
- - THTensor* self
-]]
-[[
name: th_clone
cname: newClone
return: THTensor*
diff --git a/aten/src/ATen/core/Tensor.h b/aten/src/ATen/core/Tensor.h
index b0121603c..56b1ec5e0 100644
--- a/aten/src/ATen/core/Tensor.h
+++ b/aten/src/ATen/core/Tensor.h
@@ -263,7 +263,6 @@ public:
Tensor & masked_scatter_(const Tensor & mask, const Tensor & source);
Tensor masked_select(const Tensor & mask) const;
Tensor nonzero() const;
- Tensor contiguous() const;
Tensor view(IntList size) const;
Tensor index_select(int64_t dim, const Tensor & index) const;
Tensor take(const Tensor & index) const;
@@ -449,6 +448,7 @@ public:
Tensor & clamp_max_(Scalar max);
Tensor clamp_min(Scalar min) const;
Tensor & clamp_min_(Scalar min);
+ Tensor contiguous() const;
Tensor cos() const;
Tensor & cos_();
Tensor cosh() const;
diff --git a/aten/src/ATen/core/TensorMethods.h b/aten/src/ATen/core/TensorMethods.h
index 857131298..98890c72b 100644
--- a/aten/src/ATen/core/TensorMethods.h
+++ b/aten/src/ATen/core/TensorMethods.h
@@ -92,9 +92,6 @@ inline Tensor Tensor::masked_select(const Tensor & mask) const {
inline Tensor Tensor::nonzero() const {
return type().nonzero(*this);
}
-inline Tensor Tensor::contiguous() const {
- return type().contiguous(*this);
-}
inline Tensor Tensor::view(IntList size) const {
return type().view(*this, size);
}
@@ -650,6 +647,9 @@ inline Tensor Tensor::clamp_min(Scalar min) const {
inline Tensor & Tensor::clamp_min_(Scalar min) {
return type().clamp_min_(*this, min);
}
+inline Tensor Tensor::contiguous() const {
+ return type().contiguous(*this);
+}
inline Tensor Tensor::cos() const {
return type().cos(*this);
}
diff --git a/aten/src/ATen/core/Type.h b/aten/src/ATen/core/Type.h
index 009ee309d..b0cc473b6 100644
--- a/aten/src/ATen/core/Type.h
+++ b/aten/src/ATen/core/Type.h
@@ -181,7 +181,6 @@ struct CAFFE2_API Type {
virtual Tensor s_masked_select(const Tensor & self, const Tensor & mask) const = 0;
virtual Tensor masked_select(const Tensor & self, const Tensor & mask) const = 0;
virtual Tensor nonzero(const Tensor & self) const = 0;
- virtual Tensor contiguous(const Tensor & self) const = 0;
virtual Tensor view(const Tensor & self, IntList size) const = 0;
virtual Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index) const = 0;
virtual Tensor take(const Tensor & self, const Tensor & index) const = 0;
@@ -408,6 +407,7 @@ struct CAFFE2_API Type {
virtual Tensor & clamp_max_(Tensor & self, Scalar max) const = 0;
virtual Tensor clamp_min(const Tensor & self, Scalar min) const = 0;
virtual Tensor & clamp_min_(Tensor & self, Scalar min) const = 0;
+ virtual Tensor contiguous(const Tensor & self) const = 0;
virtual Tensor cos(const Tensor & self) const = 0;
virtual Tensor & cos_(Tensor & self) const = 0;
virtual Tensor cosh(const Tensor & self) const = 0;
diff --git a/aten/src/ATen/function_wrapper.py b/aten/src/ATen/function_wrapper.py
index c83bd0e53..58e844b1e 100644
--- a/aten/src/ATen/function_wrapper.py
+++ b/aten/src/ATen/function_wrapper.py
@@ -1163,6 +1163,8 @@ def create_generic(top_env, declarations):
output_declarations = [] # type: List[OutputDeclaration]
for declaration in declarations:
+ if declaration['name'] == 'contiguous':
+ print('^^^^^', declaration)
output_options = [] # type: List[OutputDeclaration]
for option in declaration['options']:
try:
diff --git a/aten/src/ATen/native/TensorProperties.cpp b/aten/src/ATen/native/TensorProperties.cpp
index d326302c5..f7093ee69 100644
--- a/aten/src/ATen/native/TensorProperties.cpp
+++ b/aten/src/ATen/native/TensorProperties.cpp
@@ -52,5 +52,12 @@ Tensor & detach_(Tensor & self) {
return self;
}
+Tensor contiguous(const Tensor & self) {
+ if (self.is_contiguous()) {
+ return self;
+ }
+ return self.clone();
+}
+
}
}
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index c1e837ade..703853a84 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -392,6 +392,9 @@
- func: cudnn_is_acceptable(Tensor self) -> bool
device_guard: false
+- func: contiguous(Tensor self) -> Tensor
+ variants: function, method
+
- func: convolution(Tensor input, Tensor weight, Tensor? bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups) -> Tensor
- func: _convolution(Tensor input, Tensor weight, Tensor? bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor
diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py
index 9f65e7bd3..3c8ea9629 100644
--- a/tools/autograd/gen_variable_type.py
+++ b/tools/autograd/gen_variable_type.py
@@ -31,7 +31,7 @@ from .gen_autograd_functions import uses_single_grad
# These functions are written manually in templates/VariableType.cpp
MANUAL_IMPLEMENTATIONS = {
- 'contiguous', 'resize_', 'resize_as_', 'detach', 'detach_',
+ 'resize_', 'resize_as_', 'detach', 'detach_',
}
# These functions we don't want to record for tracing, because we always want
diff --git a/tools/autograd/templates/VariableType.cpp b/tools/autograd/templates/VariableType.cpp
index c62e2581a..b275b15aa 100644
--- a/tools/autograd/templates/VariableType.cpp
+++ b/tools/autograd/templates/VariableType.cpp
@@ -459,12 +459,6 @@ Tensor & VariableType::resize_as_(Tensor & self, const Tensor & the_template) co
return self;
}
-Tensor VariableType::contiguous(const Tensor & self) const {
- unpack(self, "self", 0);
- auto result = self.is_contiguous() && !jit::tracer::isTracing() ? self : self.clone();
- return result;
-}
-
Tensor VariableType::detach(const Tensor & self) const {
profiler::RecordFunction profiler("detach");
torch::jit::Node* node = nullptr;
diff --git a/tools/autograd/templates/python_variable_methods.cpp b/tools/autograd/templates/python_variable_methods.cpp
index 1e736a532..8bc76e30a 100644
--- a/tools/autograd/templates/python_variable_methods.cpp
+++ b/tools/autograd/templates/python_variable_methods.cpp
@@ -111,25 +111,6 @@ static PyObject * THPVariable_dim(PyObject* self, PyObject* args)
END_HANDLE_TH_ERRORS
}
-static Tensor dispatch_contiguous(const Tensor & self) {
- AutoNoGIL no_gil;
- DeviceGuard device_guard(self);
- return self.contiguous();
-}
-
-static PyObject * THPVariable_contiguous(PyObject* self, PyObject* args)
-{
- HANDLE_TH_ERRORS
- auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
- // avoids touching the GIL or current device if self is already contiguous
- if (self_.is_contiguous() && !jit::tracer::isTracing()) {
- Py_INCREF(self);
- return self;
- }
- return THPVariable_Wrap(dispatch_contiguous(self_));
- END_HANDLE_TH_ERRORS
-}
-
static Tensor dispatch_copy_(Tensor & self, const Tensor & other, bool non_blocking) {
AutoNoGIL no_gil;
DeviceGuard device_guard(self);
@@ -598,7 +579,6 @@ PyMethodDef variable_methods[] = {
{"apply_", (PyCFunction)THPVariable_apply_, METH_O, NULL},
{"byte", (PyCFunction)THPVariable_byte, METH_NOARGS, NULL},
{"char", (PyCFunction)THPVariable_char, METH_NOARGS, NULL},
- {"contiguous", (PyCFunction)THPVariable_contiguous, METH_NOARGS, NULL},
{"copy_", (PyCFunction)THPVariable_copy_, METH_VARARGS | METH_KEYWORDS, NULL},
{"cpu", (PyCFunction)THPVariable_cpu, METH_NOARGS, NULL},
{"cuda", (PyCFunction)THPVariable_cuda, METH_VARARGS | METH_KEYWORDS, NULL},
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment