Last active
April 28, 2024 20:22
-
-
Save CommitThis/1666517de32893e5dc4c441269f1029a to your computer and use it in GitHub Desktop.
CUDA std::vector - Unified Memory Allocator
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Example code for making a std::vector backed by | |
memory on the GPU using a custom allocator. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* Copyright 2020 G Davey | |
Permission is hereby granted, free of charge, to any person obtaining a copy of | |
this software and associated documentation files (the "Software"), to deal in | |
the Software without restriction, including without limitation the rights to | |
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of | |
the Software, and to permit persons to whom the Software is furnished to do so, | |
subject to the following conditions: | |
The above copyright notice and this permission notice shall be included in all | |
copies or substantial portions of the Software. | |
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS | |
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR | |
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER | |
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
*/ | |
#include <algorithm> // For std::copy | |
#include <array> | |
#include <cstdint> | |
#include <cstddef> // std::size_t | |
#include <iostream> | |
#include <iterator> // For std::ostream_iterator | |
#include <stdexcept> | |
#include <vector> | |
namespace c9::cuda { | |
/* The allocator class */ | |
template <typename T> | |
class unified_alloc | |
{ | |
public: | |
using value_type = T; | |
using pointer = value_type*; | |
using size_type = std::size_t; | |
unified_alloc() noexcept = default; | |
template <typename U> | |
unified_alloc(unified_alloc<U> const&) noexcept {} | |
auto allocate(size_type n, const void* = 0) -> value_type* { | |
value_type * tmp; | |
auto error = cudaMallocManaged((void**)&tmp, n * sizeof(T)); | |
if (error != cudaSuccess) { | |
throw std::runtime_error { cudaGetErrorString(error) }; | |
} | |
return tmp; | |
} | |
auto deallocate(pointer p, size_type n) -> void { | |
if (p) { | |
auto error = cudaFree(p); | |
if (error != cudaSuccess) { | |
throw std::runtime_error { cudaGetErrorString(error) }; | |
} | |
} | |
} | |
}; | |
/* Equality operators */ | |
template <class T, class U> | |
auto operator==(unified_alloc<T> const &, unified_alloc<U> const &) -> bool { | |
return true; | |
} | |
template <class T, class U> | |
auto operator!=(unified_alloc<T> const &, unified_alloc<U> const &) -> bool { | |
return false; | |
} | |
/* Template alias for convenient creating of a vector backed by unified memory | |
*/ | |
template <typename T> | |
using unified_vector = std::vector<T, unified_alloc<T>>; | |
/* Our toy kernel */ | |
__global__ | |
auto elementwise_add(int * a, int * b, int * c, std::size_t size) -> void | |
{ | |
auto thread_id = blockIdx.x * blockDim.x + threadIdx.x; | |
c[thread_id] = a[thread_id] + b[thread_id]; | |
} | |
/* Utility function that uses the CUDA API to get the ID of the current device. | |
*/ | |
auto get_current_device() -> int { | |
auto result = int{}; | |
cudaGetDevice(&result); | |
return result; | |
} | |
/* Error checking function that should wrap CUDA runtime calls and blow up if | |
anything goes wrong, returning the error message from CUDA. */ | |
auto check_error(cudaError_t err) -> void { | |
if (err != cudaSuccess) { | |
throw std::runtime_error { cudaGetErrorString(err) }; | |
} | |
}; | |
/* Define a default type trait; any instantiation of this with a type will | |
contain a value of false: | |
is_unified<int>::value == false | |
*/ | |
template<typename T> | |
struct is_unified : std::false_type{}; | |
/* A specialisation of the above type trait. If the passed in type is in | |
itself a template, and the inner type is our unified allocator, then | |
the trait type will contain a true value: | |
is_unified<std::vector<int>>::value == false | |
is_unified<c9::cuda::vector<int>>::value == true | |
Remembering that the actual signature for both the stdlib and our CUDA | |
vector is something like: | |
vector<int, allocator<int>> | |
*/ | |
template<template<typename, typename> typename Outer, typename Inner> | |
struct is_unified<Outer<Inner, unified_alloc<Inner>>> : std::true_type{}; | |
/* A helper function that retrieves whether or not the passed in type is | |
contains a unified allocator inner type, without using the type traits | |
directly */ | |
template<typename T> | |
constexpr static auto is_unified_v = is_unified<T>::value; | |
/* This uses template substitution to generate a function that only exists | |
for types that contain a unified allocator. If is_unified_v<T> is | |
false, std::enable_if_t does not exist, the substitution will fail, and | |
because it is not an error to have a failed substitution, the function | |
will simply not exist. | |
get_current_device is a utility function that uses the CUDA API to get | |
the ID of the current device. | |
*/ | |
template <typename T, typename = std::enable_if_t<is_unified_v<T>>> | |
auto prefetch(T const & container, cudaStream_t stream = 0, | |
int device = get_current_device()) | |
{ | |
using value_type = typename T::value_type; | |
auto p = container.data(); | |
if (p) { | |
check_error(cudaMemPrefetchAsync(p, container.size() * | |
sizeof(value_type), device, stream)); | |
} | |
} | |
} | |
auto main() -> int | |
{ | |
using c9::cuda::check_error; | |
/* Using paged memory */ | |
{ | |
constexpr auto array_size = 3; | |
/* Create placeholder variables that will point to memory allocated through CUDA. */ | |
int * memory_a = nullptr; | |
int * memory_b = nullptr; | |
int * memory_out = nullptr; | |
/* Allocated using unified memory */ | |
check_error(cudaMallocManaged((void**)&memory_a, array_size * sizeof(int))); | |
check_error(cudaMallocManaged((void**)&memory_b, array_size * sizeof(int))); | |
check_error(cudaMallocManaged((void**)&memory_out, array_size * sizeof(int))); | |
/* This looks slightly awkward, but what we are doing here is using "placement new" | |
to create arrays within the memory regions we have allocated. It is a convenient | |
way to initialise the memory with the values we want, as well as access the | |
result data. | |
https://en.cppreference.com/w/cpp/language/new#Placement_new */ | |
auto a = new(memory_a) std::array<int, array_size>{1, 2, 3}; | |
auto b = new(memory_b) std::array<int, array_size>{4, 5, 6}; | |
auto out = new(memory_out) std::array<int, array_size>{0, 0, 0}; | |
c9::cuda::elementwise_add<<<1, array_size>>>(a->data(), b->data(), out->data(), a->size()); | |
/* Allow the kernel to finish executing */ | |
check_error(cudaDeviceSynchronize()); | |
/* Another slightly awkward bit of C++, it writes each element to standard out, separated | |
by a colon, without having to write a for-loop. The irony of having to write an | |
explanation in the same amount of space as a traditional loop is not lost on me.... */ | |
std::copy(std::begin(*out), std::end(*out), std::ostream_iterator<int>(std::cout, ", ")); | |
std::cout << "\n"; | |
/* Arrays are trivially destructible and do not require a delete-expression, they can be | |
removed by simply de-allocating their storage. | |
https://en.cppreference.com/w/cpp/language/destructor#Trivial_destructor */ | |
check_error(cudaFree(memory_a)); | |
check_error(cudaFree(memory_b)); | |
check_error(cudaFree(memory_out)); | |
} | |
/* Using Unified Memory */ | |
{ | |
constexpr auto array_size = 3; | |
int * memory_a = nullptr; | |
int * memory_b = nullptr; | |
int * memory_out = nullptr; | |
check_error(cudaMallocManaged((void**)&memory_a, array_size * sizeof(int))); | |
check_error(cudaMallocManaged((void**)&memory_b, array_size * sizeof(int))); | |
check_error(cudaMallocManaged((void**)&memory_out, array_size * sizeof(int))); | |
auto a = new(memory_a) std::array<int, array_size>{1, 2, 3}; | |
auto b = new(memory_b) std::array<int, array_size>{4, 5, 6}; | |
auto out = new(memory_out) std::array<int, array_size>{0, 0, 0}; | |
c9::cuda::elementwise_add<<<1, array_size>>>(a->data(), b->data(), out->data(), a->size()); | |
check_error(cudaDeviceSynchronize()); | |
std::copy(std::begin(*out), std::end(*out), std::ostream_iterator<int>(std::cout, ", ")); | |
std::cout << "\n"; | |
check_error(cudaFree(memory_a)); | |
check_error(cudaFree(memory_b)); | |
check_error(cudaFree(memory_out)); | |
} | |
/* Using our unified memory backed vector */ | |
{ | |
/* Create vectors as you would normally */ | |
auto a = c9::cuda::unified_vector<int>{1, 2, 3}; | |
auto b = c9::cuda::unified_vector<int>{4, 5, 6}; | |
auto c = c9::cuda::unified_vector<int>(3); /* Sized initialisation */ | |
/* Launch kernel and synchronise device. */ | |
c9::cuda::elementwise_add<<<1, a.size()>>>(a.data(), b.data(), c.data(), a.size()); | |
check_error(cudaDeviceSynchronize()); | |
/* Print contents of output vector */ | |
std::copy(std::begin(c), std::end(c), std::ostream_iterator<int>(std::cout, ", ")); | |
std::cout << "\n"; | |
} | |
/* Using prefetching */ | |
{ | |
/* You will not be able to do the following, it will results in a | |
compile time error: */ | |
// auto data = std::vector<int>{1, 2, 3}; | |
// c9::cuda::prefetch(data); | |
/* Create vectors as you would normally */ | |
auto a = c9::cuda::unified_vector<int>{1, 2, 3}; | |
auto b = c9::cuda::unified_vector<int>{4, 5, 6}; | |
auto c = c9::cuda::unified_vector<int>(3); /* Sized initialisation */ | |
/* Prefetch! */ | |
c9::cuda::prefetch(a); | |
c9::cuda::prefetch(b); | |
/* Launch kernel and synchronise device. */ | |
c9::cuda::elementwise_add<<<1, a.size()>>>(a.data(), b.data(), c.data(), a.size()); | |
check_error(cudaDeviceSynchronize()); | |
/* Print contents of output vector */ | |
std::copy(std::begin(c), std::end(c), std::ostream_iterator<int>(std::cout, ", ")); | |
std::cout << "\n"; | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment