Skip to content

Instantly share code, notes, and snippets.

@n-Guard
Last active August 30, 2018 09:09
Show Gist options
  • Save n-Guard/afe8a288ac54a32880ceb3f1e37d95d8 to your computer and use it in GitHub Desktop.
Save n-Guard/afe8a288ac54a32880ceb3f1e37d95d8 to your computer and use it in GitHub Desktop.
// Copyright 2016, Tobias Hermann.
// https://github.com/Dobiasd/frugally-deep
// Distributed under the MIT License.
// (See accompanying LICENSE file or at
// https://opensource.org/licenses/MIT)
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "doctest.h"
#include <Eigen/Core>
#define FDEEP_FLOAT_TYPE double
#include "fdeep/fdeep.hpp"
#include "fdeep/layers/lstm_layer.hpp"
// implement testing class in order to access private parent method
class lstm_layer_test : public fdeep::internal::lstm_layer
{
public:
explicit lstm_layer_test(const std::string &name,
std::size_t units,
std::string activation,
std::string recurrent_activation,
bool use_bias,
bool return_sequences,
const fdeep::internal::RowMajorMatrixXf& W,
const fdeep::internal::RowMajorMatrixXf& U,
const fdeep::internal::RowMajorMatrixXf& bias) :
lstm_layer(name, units, activation,recurrent_activation, use_bias, return_sequences, W, U, bias)
{
}
fdeep::internal::tensor3s apply_impl_test(const fdeep::internal::tensor3s& inputs) const
{
return apply_impl(inputs);
}
};
TEST_CASE("test_lstm_layer")
{
GIVEN("some input data")
{
const fdeep::tensor3s input = {fdeep::tensor3(fdeep::shape3(1, 2, 2), {1.0, 1.0, 1.0, 1.0})};
// weights manually copied from keras model
fdeep::internal::RowMajorMatrixXf W(2,8);
W << 0.27821910, 0.29161286 ,-0.27572700, -0.054383874, 0.22253174, -0.14080036, -0.77081174, -0.00047683716,
-0.57229203, -0.59470201, -0.020450354, -0.55450273, 0.40936947, -0.54402131, 0.054506302, 0.66764033;
fdeep::internal::RowMajorMatrixXf U(2,8);
U << 0.10285524, 0.02719793, -0.12311766, 0.22590087, 0.68737066, -0.21384801, 0.4281773, 0.47004965,
-0.3293994, -0.5659455, -0.08715995, 0.33737484, -0.20980512, -0.59293926, 0.14901614, -0.1788354;
fdeep::internal::RowMajorMatrixXf bias(1,8);
bias << 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0;
WHEN("we compute output using lstm_impl function with return_sequences==true")
{
lstm_layer_test lstm("name", 2, "tanh", "sigmoid", true, true, W, U, bias);
const auto result = lstm.apply_impl_test(input);
THEN("the output should be the same as in keras")
{
const auto expected_result = fdeep::tensor3(fdeep::shape3(1, 2, 2), {0.07693497, -0.16347712, 0.13451014, -0.25103992});
CHECK(expected_result.shape() == result.front().shape());
for (std::size_t i = 0; i < expected_result.width(); i++)
{
for (std::size_t k = 0; k < expected_result.height(); k++)
CHECK(result.front().get(0,i,k) == doctest::Approx(expected_result.get(0,i,k)).epsilon(1e-6));
}
}
}
WHEN("compute output with return_sequences==false")
{
lstm_layer_test lstm("name", 2, "tanh", "sigmoid", true, false, W, U, bias);
const auto result = lstm.apply_impl_test(input);
THEN("the output should be the same as in keras")
{
const auto expected_result = fdeep::tensor3(fdeep::shape3(1, 1, 2), {0.13451014, -0.25103992});
CHECK(expected_result.shape() == result.front().shape());
for (std::size_t i = 0; i < expected_result.width(); i++)
{
for (std::size_t k = 0; k < expected_result.height(); k++)
CHECK(result.front().get(0, i, k) == doctest::Approx(expected_result.get(0, i, k)).epsilon(1e-6));
}
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment