Skip to content

Instantly share code, notes, and snippets.

@amitsingh19975
Last active July 9, 2019 10:25
Show Gist options
  • Save amitsingh19975/331d02b10836790ac251b113e6ca9340 to your computer and use it in GitHub Desktop.
Save amitsingh19975/331d02b10836790ac251b113e6ca9340 to your computer and use it in GitHub Desktop.
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_UBLAS_TENSOR_STORAGE_HPP
#define BOOST_UBLAS_TENSOR_STORAGE_HPP
#include "extents.hpp"
#include <algorithm>
#include <array>
#include <functional>
#include <initializer_list>
#include <iostream>
#include <numeric>
#include <type_traits>
#include <unordered_map>
#include <vector>
namespace boost::numeric::ublas::storage {
struct tensor_storage {};
struct sparse_storage : tensor_storage {};
struct band_storage : tensor_storage {};
struct dense_storage : tensor_storage {};
namespace detail {
template <typename T> struct is_tensor_storage {
static constexpr bool value = std::is_base_of<tensor_storage, T>::value;
};
template <typename T> struct is_sparse_storage {
static constexpr bool value = std::is_base_of<sparse_storage, T>::value;
};
template <typename T> struct is_band_storage {
static constexpr bool value = std::is_base_of<band_storage, T>::value;
};
template <typename T> struct is_dense_storage {
static constexpr bool value = std::is_base_of<dense_storage, T>::value;
};
} // namespace detail
namespace sparse_tensor {
template <typename T, typename A = std::allocator<std::pair<const size_t, T>>>
struct compressed_map : sparse_storage {
using base_type = std::unordered_map<size_t, T, std::hash<size_t>,
std::equal_to<size_t>, A>;
using value_type = T;
using key_type = typename base_type::key_type;
using hasher = typename base_type::hasher;
using allocator_type = typename base_type::allocator_type;
using pair_type = typename base_type::value_type;
using size_type = typename base_type::size_type;
using difference_type = typename base_type::difference_type;
using reference = value_type &;
using const_reference = value_type const &;
using pointer = typename base_type::pointer;
using const_pointer = typename base_type::const_pointer;
using reverse_iterator = void;
using const_reverse_iterator = void;
using iterator = typename base_type::iterator;
using const_iterator = typename base_type::const_iterator;
compressed_map() = default;
compressed_map(size_type size) : size_(size) {}
compressed_map(size_type size, value_type val) : size_(size) {
if(val != 0){
for(auto i = 0; i < size_; i++){
data_[i] = val;
}
}
}
compressed_map(compressed_map const &other)
: size_(other.size_), data_(other.data_), k_(other.k_), v_(other.v_) {
update_map();
}
compressed_map(compressed_map &&other)
: size_(other.size_), data_(std::move(other.data_)), k_(other.k_),
v_(other.v_) {
update_map();
}
compressed_map &operator=(compressed_map const &other) {
auto temp = compressed_map(other);
swap(*this, temp);
return *this;
}
compressed_map &operator=(compressed_map &&other) {
auto temp = compressed_map(std::move(other));
swap(*this, temp);
return *this;
}
compressed_map(const_iterator b, const_iterator e)
: data_(b, e), size_(std::distance(b, e)) {}
~compressed_map() = default;
constexpr iterator begin() noexcept {
update_map();
return data_.begin();
}
constexpr iterator end() noexcept {
update_map();
return data_.end();
}
constexpr const_iterator cbegin() noexcept {
update_map();
return data_.cbegin();
}
constexpr const_iterator cend() noexcept {
update_map();
return data_.cend();
}
constexpr pointer data() noexcept {
update_map();
return data_.data();
}
constexpr const_pointer cdata() noexcept {
update_map();
return data_.cdata();
}
friend auto swap(compressed_map &lhs, compressed_map &rhs) {
lhs.update_map();
rhs.update_map();
std::swap(lhs.size_, rhs.size_);
std::swap(lhs.data_, rhs.data_);
}
constexpr auto size() const noexcept { return size_; }
constexpr auto container_size() noexcept {
update_map();
return data_.size();
}
auto prune(value_type val = 0) noexcept {
update_map();
std::vector<size_t> keys;
for (auto i = begin(); i != end(); i++) {
if (i->second == val) {
keys.push_back(i->first);
}
}
for (auto const &key : keys) {
data_.erase(key);
}
}
constexpr auto contains(key_type k) const noexcept {
update_map();
return data_.find(k) != data_.end();
}
constexpr reference at(key_type k) noexcept {
if (!contains(k)) {
k_ = k;
return v_;
}
return data_.at(k);
}
constexpr const_reference at(key_type k) const noexcept {
if (!contains(k)) {
return v_;
}
return data_.at(k);
}
constexpr reference operator[](key_type k) {
if (!contains(k)) {
k_ = k;
return v_;
}
return data_[k];
}
constexpr const_reference operator[](key_type k) const noexcept {
return this->at(k);
}
// constexpr auto insert(key_type k, value_type v) {
// if (v == 0)
// return;
// data_.emplace(k, v);
// }
// constexpr auto insert(value_type const &p) {
// if (p.second == 0)
// return;
// data_.insert(p);
// }
// constexpr auto insert(value_type &&p) {
// if (p.second == 0)
// return;
// data_.insert(std::move(p));
// }
private:
constexpr auto update_map() noexcept {
if (k_ != -1 && v_) {
data_[k_] = v_;
k_ = -1;
v_ = 0;
}
}
constexpr auto update_map() const noexcept {
auto& self = const_cast<compressed_map&>(*this);
if (k_ != -1 && v_) {
self.data_[k_] = self.v_;
self.k_ = -1;
self.v_ = 0;
}
}
private:
size_type size_{0};
key_type k_{-1};
value_type v_{0};
base_type data_;
};
} // namespace sparse_tensor
namespace dense_tensor {
template <typename E>
inline static constexpr bool is_static_v =
boost::numeric::ublas::detail::is_static<E>::value;
template <typename E>
inline static constexpr bool is_dynamic_v =
boost::numeric::ublas::detail::is_dynamic<E>::value;
template <typename T, typename E, typename A, typename = void>
struct default_storage;
template <typename T, typename E, typename A>
struct default_storage<T, E, A,
typename std::enable_if<is_dynamic_v<E>>::type> {
using type = std::vector<T, A>;
};
template <typename T, typename E, typename A>
struct default_storage<T, E, A, typename std::enable_if<is_static_v<E>>::type> {
using type = std::array<T, product(E{})>;
};
template <typename T, typename E, typename A>
using default_storage_t = typename default_storage<T, E, A>::type;
} // namespace dense_tensor
} // namespace boost::numeric::ublas::storage
namespace boost::numeric::ublas::detail {
template <typename T> struct is_stl_array : std::false_type {};
template <typename T, size_t N>
struct is_stl_array<std::array<T, N>> : std::true_type {};
} // namespace boost::numeric::ublas::detail
#endif
@bassoy
Copy link

bassoy commented Jul 7, 2019

You store relative memory indices j of the sparse tensor in your unordered map.

  1. We have to see, if that approach is good. I am not sure but it might be better in terms of runtime to have two std::vector with equal length where one is storing the indices and the other the corresponding values. I guess that the e.g. addition and subtraction is much faster with vectors than with a single unordered map. However this works only for elementwise tensor operations such as tensor addition, subtraction, etc..

  2. Another problem arises for contraction operations where the algorithms depend on the multi-index i1,i2,...,ip where one mode q with the index iq has a special treatment in the algorithm. So if we consider to work with the at function of the tensor_sparse data structure, we need to compute a relative memory index j from the multi-index i1,i2,...,ip. I have analyzed this in my paper and the problem is that the performance is really bad when you have to perform a multi-index transformation for each element. This is why you design a multi-loop recursive algorithm with p loops where you only compute the necessary relative indices for accessing only non-zero elements. In other words, I believe that we need a data structure which is a generalization of the common CSR or CRC formats, see here.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment