Skip to content

Instantly share code, notes, and snippets.

@OXPHOS
Created May 25, 2016 05:49
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save OXPHOS/0eaa17411e00936f1ed61388ec5c0af6 to your computer and use it in GitHub Desktop.
Save OXPHOS/0eaa17411e00936f1ed61388ec5c0af6 to your computer and use it in GitHub Desktop.
#include <shogun/lib/config.h>
#include <shogun/lib/SGVector.h>
#include <memory>
#include <shogun/mathematics/eigen3.h>
class linalg_new
{
shared_ptr<CPUBackend> cpu_backend;
shared_ptr<GPUBackend> gpu_backend;
public:
template<class T>
linalg_new(shared_ptr<CPUBackend> cpu_backend={}, shared_ptr<GPUBackend> gpu_backend={}){
this->cpu_backend = cpu_backend;
this->gpu_backend = gpu_backend;
}
template <class T>
static T dot(shared_ptr<BaseVector<T>> a, shared_ptr<BaseVector<T>> b)
{
if (a->onGPU() && b->onGPU()) {
if (this->hasGPUBackend()) {
// do the gpu backend dot product
// you shouldn't care whether it's viennacl or some other GPU backend.
return this->gpu_backend->dot(*static_cast<GPU_Vector<T>*>(a), *static_cast<GPU_Vector<T>*>(b));
} else {
// either throw a RuntimeException or transfer back the data to cpu
throw new RuntimeException("user did not register GPU backend");
}
}
else {
// take care that the matricies are on the same backend
if (a->onGPU()){ //Transfer back to CPU || throw error ??? }
else if (b->onGPU()) { //Transfer back to CPU || throw error }
// do the non-gpu based default backend:
// this should be actually as well implemented in a separate class's function and just that being called here:
// like:
return this->cpu_backend->dot(*static_cast<CPU_Vector<T>*>(a), *static_cast<CPU_Vector<T>*>(b));
}
}
bool hasGPUBackend()
{
return gpu_backend != nullptr;
}
}
class CPUBackend
{
template <typename T>
T dot(CPU_Vector<T> a, CPU_Vector<T> b)
{
typedef Eigen::Matrix<T, Eigen::Dynamic, 1> VectorXt;
Eigen::Map<VectorXt> vec_a = a.vec;
Eigen::Map<VectorXt> vec_b = b.vec;
return vec_a.dot(vec_b);
}
// similarly, other methods
};
class GPUBackend
{
#ifdef HAVE_VIENNACL
template <typename T>
T dot(GPU_Vector<T> a, GPU_Vector<T> b)
{
// Dereference a.GPUptr and b.GPUptr to vcl_vector?
// viennacl::linalg::inner_prod(vcl_vector_a, vcl_vector_b);
// Transfer back to CPU end???
}
// similarly, other methods
#endif
};
template <class T>
struct BaseVector
{
BaseVector(){}
BaseVector(SGVector<T> vec){}
virtual bool onGPU()
{
return false;
}
};
template <class T>
struct CPU_Vector : public BaseVector<T>
{
//unique_pointer<SGVector<T>> CPUptr;
SGVector<T> vec;
CPU_Vector(SGVector<T> vec)
{
//CPUptr = unique_pointer<SGVector<T>>(new SGVector<T>(vec));
this->vec = new SGVector<T>(vec);
}
bool onGPU()
{
return false;
}
};
template <typename T>
struct GPU_Vector : public BaseVector<T>
{
#ifdef HAVE_VIENNACL
unique_pointer<VCLMemoryArray> GPUptr;
// other gpu related stuff
#endif
bool onGPU()
{
return true;
}
};
@OXPHOS
Copy link
Author

OXPHOS commented May 25, 2016

Need to do next:

  • Add test case for CPU
  • Finish GPU part
  • Add test case for GPU

Then:

  • Integrate SGMatrix to Basevector
  • Add other methods

Questions:

  • line33: throw error or transfer?
  • line71: transfer back to CPU or keep on GPU (especially for methods return vectors and matrices)
  • line79: if use class instead of struct, class_list.cpp will raise error.
  • line93: unique_ptr copies memory. So is it necessary to have the pointer?
  • Where should the GPU_back_to_CPU method go? I think it should be either independent or go with the Base* factory(SGVector vec) method.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment