Skip to content

Instantly share code, notes, and snippets.

@wolfv
Last active December 8, 2017 21:47
Show Gist options
  • Save wolfv/4328cd3fa58339458a01167380a02337 to your computer and use it in GitHub Desktop.
Save wolfv/4328cd3fa58339458a01167380a02337 to your computer and use it in GitHub Desktop.
Benchmark adapter for xtensor
/***************************************************************************
* Copyright (c) 2016, Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef BENCHMARK_SHAPE_HPP
#define BENCHMARK_SHAPE_HPP
#include <benchmark/benchmark.h>
#include "xtensor/xshape.hpp"
#include "xtensor/xstorage.hpp"
#include "xtensor/xutils.hpp"
#include "xtensor/xadapt.hpp"
#include "xtensor/xnoalias.hpp"
namespace xt
{
template <class V>
void bm_array_adapter(benchmark::State& state)
{
const V a({1,2,3,4});
const V b({1,2,3,4});
while(state.KeepRunning())
{
auto aa = xt::adapt(a);
auto ab = xt::adapt(b);
xtensor<std::size_t, 1> result = aa + ab;
benchmark::DoNotOptimize(result.raw_data());
}
}
template <class V>
void bm_array_adapter_result(benchmark::State& state)
{
const V a({1,2,3,4});
const V b({1,2,3,4});
V res({1,2,3,4});
while(state.KeepRunning())
{
auto aa = xt::adapt(a);
auto ab = xt::adapt(b);
auto ar = xt::adapt(res);
xt::noalias(ar) = aa + ab;
benchmark::DoNotOptimize(ar.raw_data());
}
}
template <class V>
void bm_no_adapter(benchmark::State& state)
{
V a({1,2,3,4});
V b({1,2,3,4});
while(state.KeepRunning())
{
V result;
xt::resize_container(result, a.size());
for (std::size_t i = 0; i < a.size(); ++i)
{
result[i] = a[i] + b[i];
}
benchmark::DoNotOptimize(result.data());
benchmark::DoNotOptimize(a.data());
benchmark::DoNotOptimize(b.data());
}
}
using array_type = std::array<std::size_t, 4>;
using uvector_type = xt::uvector<std::size_t, xsimd::aligned_allocator<std::size_t, 32>>;
// This works only with the following patch:
// add to xcontainer.hpp and replace the using allocator_type ... in the xcontainer class
//
// namespace detail
// {
// template <class T>
// struct allocator_type_impl
// {
// using type = typename T::allocator_type;
// };
// template <class T, std::size_t N>
// struct allocator_type_impl<const std::array<T, N>>
// {
// using type = std::allocator<T>; // fake allocator for testing
// };
// }
// template <class T>
// using allocator_type_t = typename detail::allocator_type_impl<T>::type;
// BENCHMARK_TEMPLATE(bm_array_adapter, array_type);
// BENCHMARK_TEMPLATE(bm_array_adapter_result, array_type);
BENCHMARK_TEMPLATE(bm_array_adapter, uvector_type);
BENCHMARK_TEMPLATE(bm_array_adapter, std::vector<std::size_t>);
BENCHMARK_TEMPLATE(bm_no_adapter, array_type);
BENCHMARK_TEMPLATE(bm_no_adapter, std::vector<std::size_t>);
}
#endif
diff --git a/benchmark/main.cpp b/benchmark/main.cpp
index c432634..6053517 100644
--- a/benchmark/main.cpp
+++ b/benchmark/main.cpp
@@ -10,11 +10,12 @@
#include <benchmark/benchmark.h>
-#include "benchmark_assign.hpp"
-#include "benchmark_math.hpp"
-#include "benchmark_views.hpp"
-#include "benchmark_container.hpp"
-#include "benchmark_xshape.hpp"
+// #include "benchmark_assign.hpp"
+// #include "benchmark_math.hpp"
+// #include "benchmark_views.hpp"
+// #include "benchmark_container.hpp"
+// #include "benchmark_xshape.hpp"
+#include "benchmark_adapter.hpp"
#ifdef XTENSOR_USE_XSIMD
#ifdef __GNUC__
diff --git a/include/xtensor/xcontainer.hpp b/include/xtensor/xcontainer.hpp
index d4f207e..49abbab 100644
--- a/include/xtensor/xcontainer.hpp
+++ b/include/xtensor/xcontainer.hpp
@@ -27,6 +27,25 @@
namespace xt
{
+
+ namespace detail
+ {
+ template <class T>
+ struct allocator_type_impl
+ {
+ using type = typename T::allocator_type;
+ };
+
+ template <class T, std::size_t N>
+ struct allocator_type_impl<std::array<T, N>>
+ {
+ using type = std::allocator<T>; // fake allocator for testing
+ };
+ }
+
+ template <class T>
+ using allocator_type_t = typename detail::allocator_type_impl<T>::type;
+
template <class D>
struct xcontainer_iterable_types
{
@@ -58,7 +77,7 @@ namespace xt
using inner_types = xcontainer_inner_types<D>;
using container_type = typename inner_types::container_type;
- using allocator_type = typename container_type::allocator_type;
+ using allocator_type = allocator_type_t<std::decay_t<container_type>>;
using value_type = typename container_type::value_type;
using reference = typename container_type::reference;
using const_reference = typename container_type::const_reference;
@@ -1189,7 +1208,8 @@ namespace xt
template <class C, class S>
inline void resize_data_container(C& c, S size)
{
- c.resize(size);
+ // c.resize(size);
+ xt::resize_container(c, size);
}
template <class C, class S>
diff --git a/include/xtensor/xtensor.hpp b/include/xtensor/xtensor.hpp
index 78549fc..8f3c845 100644
--- a/include/xtensor/xtensor.hpp
+++ b/include/xtensor/xtensor.hpp
@@ -333,7 +333,7 @@ namespace xt
// the shape is always initialized since it has a static number of dimensions.
if (e.derived_cast().size() == 1)
{
- m_data.resize(1);
+ resize_container(m_data, 1);
}
semantic_base::assign(e);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment