Skip to content

Instantly share code, notes, and snippets.

@d1ff
Created May 21, 2017 13:32
Show Gist options
  • Star 4 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save d1ff/0dd933f906b34862a51abf0471b19f9e to your computer and use it in GitHub Desktop.
Save d1ff/0dd933f906b34862a51abf0471b19f9e to your computer and use it in GitHub Desktop.
Minimal example of processing PyTorch tensors in C++ with cv::cuda::GpuMat
#include <iostream>
#include <boost/filesystem.hpp>
#include <boost/python.hpp>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/cudaimgproc.hpp>
#include "pytorch_cpp_interop.h"
namespace py = boost::python;
namespace fs = boost::filesystem;
struct PythonState {
py::object main_module;
py::object globals;
PythonState()
: main_module(py::object(
py::handle<>(py::borrowed(PyImport_AddModule("__main__")))))
{
globals = main_module.attr("__dict__");
}
py::object import(const std::string& module_path)
{
return _import(fs::path(module_path));
}
py::object _import(const fs::path& module_path)
{
try {
py::dict locals;
locals["mname"] = module_path.stem().string();
locals["filename"] = module_path.string();
py::exec("import importlib.util\n"
"spec = importlib.util.spec_from_file_location(mname, "
"filename)\n"
"imported = importlib.util.module_from_spec(spec)\n"
"spec.loader.exec_module(imported)",
globals, locals);
return locals["imported"];
} catch (py::error_already_set& err) {
PyErr_Print();
}
return py::object();
}
py::object exec(const char* code, py::dict& locals)
{
try {
return py::exec(code, globals, locals);
} catch (py::error_already_set& err) {
PyErr_Print();
}
return py::object();
}
py::object exec(const char* code)
{
try {
return py::exec(code, globals, globals);
} catch (py::error_already_set& err) {
PyErr_Print();
}
return py::object();
}
};
int main(int argc, char* argv[])
{
Py_Initialize();
PyEval_InitThreads();
long _tensor_ptr = -1;
py::dict locals;
try {
PythonState state;
state.exec("import torch\n"
"def make_tensor():\n"
" return torch.cuda.FloatTensor(10, 10, 3)");
py::object torch = state.globals["torch"];
py::object make_tensor = state.globals["make_tensor"];
py::object tensor = make_tensor();
locals["t"] = tensor;
state.exec("data_ptr = t.data_ptr()", locals);
_tensor_ptr = py::extract<long>(locals["data_ptr"]);
std::cout << "init torch.cuda.FloatTensor=" << _tensor_ptr
<< std::endl;
if (_tensor_ptr < 0) {
return 0;
}
void* tensor_ptr = reinterpret_cast<void*>(_tensor_ptr);
cv::cuda::GpuMat mat(10, 10, CV_32FC3, (uchar*)tensor_ptr);
cv::Mat x_cpu(10, 10, CV_32FC3);
x_cpu = cv::Scalar::all(1);
mat.upload(x_cpu);
state.exec("t.copy_(torch.randn(t.size()))", locals);
state.exec("print('Tensor sum:', torch.sum(t))", locals);
std::cout << "Matrix sum " << cv::sum(cv::cuda::sum(mat)) << std::endl;
} catch (py::error_already_set& err) {
PyErr_Print();
}
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment