Skip to content

Instantly share code, notes, and snippets.

@eyalroz
Last active September 19, 2016 20:49
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save eyalroz/faa004ff5b03371a52e71d0f7f88bfc2 to your computer and use it in GitHub Desktop.
Save eyalroz/faa004ff5b03371a52e71d0f7f88bfc2 to your computer and use it in GitHub Desktop.
/**
* memory_region.h
*
* Definition of the gsl::span-like memory_region class
*/
#pragma once
#ifndef SRC_UTIL_MEMORY_REGION_H_
#define SRC_UTIL_MEMORY_REGION_H_
#if ( __cplusplus < 201103L )
#error "C++11 support required for the memory_region class"
#endif
#include "gsl/gsl-lite.h"
#include <cstring> // for memcmp
#ifdef __CUDACC__
#define CUDA_DESIGNATOR __host__ __device__
#ifdef Expects
#undef Expects
#undef Ensures
#define Expects(x)
#define Ensures(x)
#endif
#else /* non-CUDA code */
#define CUDA_DESIGNATOR
#ifndef Expects
#define Expects(x) ::gsl::fail_fast_assert((x))
#define Ensures(x) ::gsl::fail_fast_assert((x))
#endif
#endif
namespace util {
/**
* An untyped raw buffer, for use instead of gsl::span when
* you don't know what the type in, but you do want to avoid passing
* a ptr and a length separately. The methods are copied almost
* verbatim from gsl::span, except that we keep a size in bytes
* instead of an end pointer (and maybe we should just go with the end?),
* and the methods requiring type information (e.g. iteration) are
* dropped.
*
* @todo
* 1. The const-correctness of gsl::span (or at least gsl-lite's
* implementation) is a bit fishy to me; how are you supposed
* to have a span (or a memory region) which must not be altered,
* when you have a method like data() const returning a non-const
* pointer?
*/
class memory_region
{
public:
typedef size_t size_type;
typedef void * pointer;
typedef void const * const_pointer;
pointer data_ { nullptr };
size_type size_ { 0 }; // in bytes
CUDA_DESIGNATOR memory_region() { }
CUDA_DESIGNATOR memory_region( pointer data, size_type size )
: data_ ( data )
, size_ ( size )
{
Expects( size == 0 || ( size > 0 && data != nullptr ) );
}
template < class U >
CUDA_DESIGNATOR memory_region( U* begin, U* end )
: data_ ( begin )
, size_ ( reinterpret_cast<char*>(end) - reinterpret_cast<char*>(begin) )
{
Expects( begin <= end );
}
// In gsl::span, this is private
template< typename U >
CUDA_DESIGNATOR memory_region( U * & data, size_type size )
: data_ ( data )
, size_ ( size )
{
Expects( size == 0 || ( size > 0 && data != nullptr ) );
}
// In gsl::span, this is private
template< typename U >
CUDA_DESIGNATOR memory_region( U * const & data, size_type size )
: data_ ( data )
, size_ ( size )
{
Expects( size == 0 || ( size > 0 && data != nullptr ) );
}
template< class U, size_t N >
CUDA_DESIGNATOR memory_region( U (&arr)[N] )
: data_ ( arr )
, size_ ( N )
{}
template< class U, size_t N >
CUDA_DESIGNATOR memory_region( std::array< U, N > & arr )
: data_ ( arr.data() )
, size_ ( N )
{}
#if gsl_HAVE_DEFAULT_FUNCTION_TEMPLATE_ARG && gsl_HAVE_CONTAINER_DATA_METHOD
// SFINAE enable only if Cont has a data() member function
template< class Cont, typename = decltype(std::declval<Cont>().data()) >
CUDA_DESIGNATOR memory_region( Cont & cont )
: data_ ( cont.data() )
, size_ ( cont.size() )
#else
template< class Cont >
CUDA_DESIGNATOR memory_region( Cont & cont )
: ptr_ ( cont.size() == 0 ? nullptr : &cont[0] )
, end_ ( cont.size() )
#endif
{}
CUDA_DESIGNATOR memory_region( memory_region && ) = default;
CUDA_DESIGNATOR memory_region( memory_region const & ) = default;
template< typename U >
CUDA_DESIGNATOR memory_region( gsl::span<U> const & s )
: data_ ( s.begin() )
, size_ ( s.size() )
{}
CUDA_DESIGNATOR memory_region & operator=( memory_region && ) = default;
CUDA_DESIGNATOR memory_region & operator=( memory_region const & ) = default;
// TODO: construct from gsl::span
CUDA_DESIGNATOR memory_region subbuffer( size_type offset ) const noexcept
{
Expects( offset >= 0 && offset < this->size() );
return memory_region( reinterpret_cast<char*>(data_) + offset,
this->length() - offset );
}
CUDA_DESIGNATOR memory_region subbuffer( size_type offset, size_type count ) const noexcept
{
Expects( offset >= 0 && offset < this->size() && count <= this->size() - offset );
return memory_region( reinterpret_cast<char*>(data_) + offset, count );
}
CUDA_DESIGNATOR operator bool () const noexcept
{
return data_ != nullptr;
}
// Note: Behavior here is _unlike_ span - we don't compare bytes
CUDA_DESIGNATOR bool operator==( memory_region const & other ) const noexcept
{
return size() == other.size() && (data_ == other.data_ );
}
CUDA_DESIGNATOR bool operator!=( memory_region const & other ) const noexcept
{
return !( *this == other );
}
CUDA_DESIGNATOR pointer data() const noexcept
{
return data_;
}
CUDA_DESIGNATOR bool empty() const noexcept
{
return size() == 0;
}
CUDA_DESIGNATOR size_type size() const noexcept
{
return size_;
}
CUDA_DESIGNATOR size_type length() const noexcept
{
return size();
}
// memory regions don't know about usage, only allocation
// size_type used_length() const noexcept
// {
// return length();
// }
//
// CUDA_DESIGNATOR size_type used_bytes() const noexcept
// {
// return bytes();
// }
CUDA_DESIGNATOR size_type bytes() const noexcept
{
return size();
}
void swap( memory_region & other ) noexcept
{
using std::swap;
swap( data_, other.data_ );
swap( size_, other.size_ );
}
CUDA_DESIGNATOR gsl::span< const gsl::byte > as_bytes() const noexcept
{
return gsl::span< const gsl::byte >( reinterpret_cast<const gsl::byte *>( data() ), bytes() );
}
CUDA_DESIGNATOR gsl::span< gsl::byte > as_writeable_bytes() const noexcept
{
return gsl::span< gsl::byte >( reinterpret_cast<gsl::byte *>( data() ), bytes() );
}
template< typename U >
gsl::span< U > as_span() const noexcept
{
Expects( ( this->bytes() % sizeof(U) ) == 0 );
return gsl::span< U >( reinterpret_cast<U *>( this->data() ), this->bytes() / sizeof( U ) );
}
// The most un-span-like behavior of memory_region: decaying to the pointer
operator void*() const noexcept { return data(); }
};
// memory_region creator functions (see ctors)
template< typename T >
CUDA_DESIGNATOR memory_region as_memory_region( T * begin, T * end )
{
return memory_region( begin, end );
}
template< typename T >
CUDA_DESIGNATOR memory_region as_memory_region( T * begin, size_t size )
{
return memory_region( begin, size );
}
template< typename T, size_t N >
CUDA_DESIGNATOR memory_region as_memory_region( T (&arr)[N] )
{
return memory_region( arr, N );
}
template< typename T, size_t N >
CUDA_DESIGNATOR memory_region as_memory_region( std::array<T,N> & arr )
{
return memory_region( arr );
}
template< class Cont >
CUDA_DESIGNATOR auto as_memory_region( Cont & cont ) -> memory_region
{
return memory_region( cont );
}
// ... and a span creator
template< typename U >
const gsl::span< U > as_span(const memory_region& region )
{
return region.as_span<U>();
}
template< typename U >
CUDA_DESIGNATOR gsl::span< U > as_span( memory_region& region )
{
return region.as_span<U>();
}
} // namespace util
#endif /* SRC_UTIL_MEMORY_REGION_H_ */
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment