Skip to content

Instantly share code, notes, and snippets.

@killeent
Created May 9, 2017 23:25
Show Gist options
  • Save killeent/c00de46c2a896335a52552604cc4d74b to your computer and use it in GitHub Desktop.
Save killeent/c00de46c2a896335a52552604cc4d74b to your computer and use it in GitHub Desktop.
#if !defined(TH_REAL_IS_HALF)
PyObject * THPTensor_(addmv)(PyObject *self, PyObject *args, PyObject *kwargs)
{
PyObject *__kw_beta = NULL;
PyObject *__kw_alpha = NULL;
PyObject *__kw_mat = NULL;
PyObject *__kw_vec = NULL;
if (kwargs) {
__kw_beta = PyDict_GetItemString(kwargs, "beta");
__kw_alpha = PyDict_GetItemString(kwargs, "alpha");
__kw_mat = PyDict_GetItemString(kwargs, "mat");
__kw_vec = PyDict_GetItemString(kwargs, "vec");
}
HANDLE_TH_ERRORS
int __tuplecount = args ? PyTuple_Size(args) : 0;
int __dictcount = kwargs ? PyDict_Size(kwargs) : 0;
int __argcount = __tuplecount + __dictcount;
PyObject *__out;
__out = kwargs ? PyDict_GetItemString(kwargs, "out") : NULL;
if (__out == Py_None) { __out = NULL; __dictcount--; __argcount--; }
if (__out != NULL &&
__argcount == 5 &&
(PyObject*)Py_TYPE(__out) == THPTensorClass &&
(__tuplecount > 0 || __kw_beta) && THPUtils_(checkReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_beta)) &&
(__tuplecount > 1 || __kw_alpha) && THPUtils_(checkReal)((__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_alpha)) &&
(__tuplecount > 2 || __kw_mat) && (PyObject*)Py_TYPE((__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_mat)) == THPTensorClass &&
(__tuplecount > 3 || __kw_vec) && (PyObject*)Py_TYPE((__tuplecount > 3 ? PyTuple_GET_ITEM(args, 3) : __kw_vec)) == THPTensorClass) {
#if IS_CUDA
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args, (PyObject*)self);
#endif
THTensor* arg_result = ((THPTensor*)__out)->cdata;
real arg_beta = THPUtils_(unpackReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_beta));
THTensor* arg_self = ((THPTensor*)self)->cdata;
real arg_alpha = THPUtils_(unpackReal)((__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_alpha));
THTensor* arg_mat = ((THPTensor*)(__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_mat))->cdata;
THTensor* arg_vec = ((THPTensor*)(__tuplecount > 3 ? PyTuple_GET_ITEM(args, 3) : __kw_vec))->cdata;
PyThreadState *_save = NULL;
try {
Py_UNBLOCK_THREADS;
THTensor_(addmv)(LIBRARY_STATE arg_result, arg_beta, arg_self, arg_alpha, arg_mat, arg_vec);
Py_BLOCK_THREADS;
Py_INCREF(__out);
return (PyObject*)(__out);
} catch (...) {
if (_save) {
Py_BLOCK_THREADS;
}
throw;
}
} else if (__out == NULL &&
__argcount == 4 &&
(__tuplecount > 0 || __kw_beta) && THPUtils_(checkReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_beta)) &&
(__tuplecount > 1 || __kw_alpha) && THPUtils_(checkReal)((__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_alpha)) &&
(__tuplecount > 2 || __kw_mat) && (PyObject*)Py_TYPE((__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_mat)) == THPTensorClass &&
(__tuplecount > 3 || __kw_vec) && (PyObject*)Py_TYPE((__tuplecount > 3 ? PyTuple_GET_ITEM(args, 3) : __kw_vec)) == THPTensorClass) {
#if IS_CUDA
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args, (PyObject*)self);
#endif
THPTensorPtr _result_guard = (THPTensor*) THPTensor_(NewEmpty)();
if (!_result_guard.get()) return NULL;
THPTensor* result = _result_guard.get();
THTensor* arg_result = ((THPTensor*)result)->cdata;
real arg_beta = THPUtils_(unpackReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_beta));
THTensor* arg_self = ((THPTensor*)self)->cdata;
real arg_alpha = THPUtils_(unpackReal)((__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_alpha));
THTensor* arg_mat = ((THPTensor*)(__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_mat))->cdata;
THTensor* arg_vec = ((THPTensor*)(__tuplecount > 3 ? PyTuple_GET_ITEM(args, 3) : __kw_vec))->cdata;
PyThreadState *_save = NULL;
try {
Py_UNBLOCK_THREADS;
THTensor_(addmv)(LIBRARY_STATE arg_result, arg_beta, arg_self, arg_alpha, arg_mat, arg_vec);
Py_BLOCK_THREADS;
Py_INCREF(result);
return (PyObject*)(result);
} catch (...) {
if (_save) {
Py_BLOCK_THREADS;
}
throw;
}
} else if (__out != NULL &&
__argcount == 4 &&
(PyObject*)Py_TYPE(__out) == THPTensorClass &&
(__tuplecount > 0 || __kw_beta) && THPUtils_(checkReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_beta)) &&
(__tuplecount > 1 || __kw_mat) && (PyObject*)Py_TYPE((__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_mat)) == THPTensorClass &&
(__tuplecount > 2 || __kw_vec) && (PyObject*)Py_TYPE((__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_vec)) == THPTensorClass) {
#if IS_CUDA
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args, (PyObject*)self);
#endif
THTensor* arg_result = ((THPTensor*)__out)->cdata;
real arg_beta = THPUtils_(unpackReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_beta));
THTensor* arg_self = ((THPTensor*)self)->cdata;
THTensor* arg_mat = ((THPTensor*)(__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_mat))->cdata;
THTensor* arg_vec = ((THPTensor*)(__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_vec))->cdata;
PyThreadState *_save = NULL;
try {
Py_UNBLOCK_THREADS;
THTensor_(addmv)(LIBRARY_STATE arg_result, arg_beta, arg_self, AS_REAL(1), arg_mat, arg_vec);
Py_BLOCK_THREADS;
Py_INCREF(__out);
return (PyObject*)(__out);
} catch (...) {
if (_save) {
Py_BLOCK_THREADS;
}
throw;
}
} else if (__out != NULL &&
__argcount == 4 &&
(PyObject*)Py_TYPE(__out) == THPTensorClass &&
(__tuplecount > 0 || __kw_alpha) && THPUtils_(checkReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_alpha)) &&
(__tuplecount > 1 || __kw_mat) && (PyObject*)Py_TYPE((__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_mat)) == THPTensorClass &&
(__tuplecount > 2 || __kw_vec) && (PyObject*)Py_TYPE((__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_vec)) == THPTensorClass) {
#if IS_CUDA
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args, (PyObject*)self);
#endif
THTensor* arg_result = ((THPTensor*)__out)->cdata;
THTensor* arg_self = ((THPTensor*)self)->cdata;
real arg_alpha = THPUtils_(unpackReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_alpha));
THTensor* arg_mat = ((THPTensor*)(__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_mat))->cdata;
THTensor* arg_vec = ((THPTensor*)(__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_vec))->cdata;
PyThreadState *_save = NULL;
try {
Py_UNBLOCK_THREADS;
THTensor_(addmv)(LIBRARY_STATE arg_result, AS_REAL(1), arg_self, arg_alpha, arg_mat, arg_vec);
Py_BLOCK_THREADS;
Py_INCREF(__out);
return (PyObject*)(__out);
} catch (...) {
if (_save) {
Py_BLOCK_THREADS;
}
throw;
}
} else if (__out == NULL &&
__argcount == 3 &&
(__tuplecount > 0 || __kw_beta) && THPUtils_(checkReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_beta)) &&
(__tuplecount > 1 || __kw_mat) && (PyObject*)Py_TYPE((__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_mat)) == THPTensorClass &&
(__tuplecount > 2 || __kw_vec) && (PyObject*)Py_TYPE((__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_vec)) == THPTensorClass) {
#if IS_CUDA
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args, (PyObject*)self);
#endif
THPTensorPtr _result_guard = (THPTensor*) THPTensor_(NewEmpty)();
if (!_result_guard.get()) return NULL;
THPTensor* result = _result_guard.get();
THTensor* arg_result = ((THPTensor*)result)->cdata;
real arg_beta = THPUtils_(unpackReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_beta));
THTensor* arg_self = ((THPTensor*)self)->cdata;
THTensor* arg_mat = ((THPTensor*)(__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_mat))->cdata;
THTensor* arg_vec = ((THPTensor*)(__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_vec))->cdata;
PyThreadState *_save = NULL;
try {
Py_UNBLOCK_THREADS;
THTensor_(addmv)(LIBRARY_STATE arg_result, arg_beta, arg_self, AS_REAL(1), arg_mat, arg_vec);
Py_BLOCK_THREADS;
Py_INCREF(result);
return (PyObject*)(result);
} catch (...) {
if (_save) {
Py_BLOCK_THREADS;
}
throw;
}
} else if (__out == NULL &&
__argcount == 3 &&
(__tuplecount > 0 || __kw_alpha) && THPUtils_(checkReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_alpha)) &&
(__tuplecount > 1 || __kw_mat) && (PyObject*)Py_TYPE((__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_mat)) == THPTensorClass &&
(__tuplecount > 2 || __kw_vec) && (PyObject*)Py_TYPE((__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_vec)) == THPTensorClass) {
#if IS_CUDA
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args, (PyObject*)self);
#endif
THPTensorPtr _result_guard = (THPTensor*) THPTensor_(NewEmpty)();
if (!_result_guard.get()) return NULL;
THPTensor* result = _result_guard.get();
THTensor* arg_result = ((THPTensor*)result)->cdata;
THTensor* arg_self = ((THPTensor*)self)->cdata;
real arg_alpha = THPUtils_(unpackReal)((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_alpha));
THTensor* arg_mat = ((THPTensor*)(__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_mat))->cdata;
THTensor* arg_vec = ((THPTensor*)(__tuplecount > 2 ? PyTuple_GET_ITEM(args, 2) : __kw_vec))->cdata;
PyThreadState *_save = NULL;
try {
Py_UNBLOCK_THREADS;
THTensor_(addmv)(LIBRARY_STATE arg_result, AS_REAL(1), arg_self, arg_alpha, arg_mat, arg_vec);
Py_BLOCK_THREADS;
Py_INCREF(result);
return (PyObject*)(result);
} catch (...) {
if (_save) {
Py_BLOCK_THREADS;
}
throw;
}
} else if (__out != NULL &&
__argcount == 3 &&
(PyObject*)Py_TYPE(__out) == THPTensorClass &&
(__tuplecount > 0 || __kw_mat) && (PyObject*)Py_TYPE((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_mat)) == THPTensorClass &&
(__tuplecount > 1 || __kw_vec) && (PyObject*)Py_TYPE((__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_vec)) == THPTensorClass) {
#if IS_CUDA
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args, (PyObject*)self);
#endif
THTensor* arg_result = ((THPTensor*)__out)->cdata;
THTensor* arg_self = ((THPTensor*)self)->cdata;
THTensor* arg_mat = ((THPTensor*)(__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_mat))->cdata;
THTensor* arg_vec = ((THPTensor*)(__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_vec))->cdata;
PyThreadState *_save = NULL;
try {
Py_UNBLOCK_THREADS;
THTensor_(addmv)(LIBRARY_STATE arg_result, AS_REAL(1), arg_self, AS_REAL(1), arg_mat, arg_vec);
Py_BLOCK_THREADS;
Py_INCREF(__out);
return (PyObject*)(__out);
} catch (...) {
if (_save) {
Py_BLOCK_THREADS;
}
throw;
}
} else if (__out == NULL &&
__argcount == 2 &&
(__tuplecount > 0 || __kw_mat) && (PyObject*)Py_TYPE((__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_mat)) == THPTensorClass &&
(__tuplecount > 1 || __kw_vec) && (PyObject*)Py_TYPE((__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_vec)) == THPTensorClass) {
#if IS_CUDA
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args, (PyObject*)self);
#endif
THPTensorPtr _result_guard = (THPTensor*) THPTensor_(NewEmpty)();
if (!_result_guard.get()) return NULL;
THPTensor* result = _result_guard.get();
THTensor* arg_result = ((THPTensor*)result)->cdata;
THTensor* arg_self = ((THPTensor*)self)->cdata;
THTensor* arg_mat = ((THPTensor*)(__tuplecount > 0 ? PyTuple_GET_ITEM(args, 0) : __kw_mat))->cdata;
THTensor* arg_vec = ((THPTensor*)(__tuplecount > 1 ? PyTuple_GET_ITEM(args, 1) : __kw_vec))->cdata;
PyThreadState *_save = NULL;
try {
Py_UNBLOCK_THREADS;
THTensor_(addmv)(LIBRARY_STATE arg_result, AS_REAL(1), arg_self, AS_REAL(1), arg_mat, arg_vec);
Py_BLOCK_THREADS;
Py_INCREF(result);
return (PyObject*)(result);
} catch (...) {
if (_save) {
Py_BLOCK_THREADS;
}
throw;
}
}
THPUtils_invalidArguments(args, kwargs, "addmv", 4, "(" THPTensorStr " mat, " THPTensorStr " vec, #" THPTensorStr " out)", "(" RealStr " beta, " THPTensorStr " mat, " THPTensorStr " vec, #" THPTensorStr " out)", "(" RealStr " alpha, " THPTensorStr " mat, " THPTensorStr " vec, #" THPTensorStr " out)", "(" RealStr " beta, " RealStr " alpha, " THPTensorStr " mat, " THPTensorStr " vec, #" THPTensorStr " out)");
return NULL;
END_HANDLE_TH_ERRORS
}
#endif
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment