Created
October 19, 2020 13:25
-
-
Save joy4eg/e1d22786f1afa8ab14a900addd9750fd to your computer and use it in GitHub Desktop.
tensorflow lite and gcc 6.3
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From f67d35439b6cc35dff1c1c837bdec50a222826ed Mon Sep 17 00:00:00 2001 | |
From: Yaroslav Syrytsia <me@ys.lc> | |
Date: Mon, 19 Oct 2020 16:21:12 +0300 | |
Subject: [PATCH] absl: fix build | |
--- | |
.../downloads/absl/absl/base/internal/exponential_biased.cc | 2 +- | |
1 file changed, 1 insertion(+), 1 deletion(-) | |
diff --git a/tensorflow/lite/tools/make/downloads/absl/absl/base/internal/exponential_biased.cc b/tensorflow/lite/tools/make/downloads/absl/absl/base/internal/exponential_biased.cc | |
index 67307b3898..313fc5d302 100644 | |
--- a/tensorflow/lite/tools/make/downloads/absl/absl/base/internal/exponential_biased.cc | |
+++ b/tensorflow/lite/tools/make/downloads/absl/absl/base/internal/exponential_biased.cc | |
@@ -53,7 +53,7 @@ int64_t ExponentialBiased::GetSkipCount(int64_t mean) { | |
// under piii debug for some binaries. | |
double q = static_cast<uint32_t>(rng >> (kPrngNumBits - 26)) + 1.0; | |
// Put the computed p-value through the CDF of a geometric. | |
- double interval = bias_ + (std::log2(q) - 26) * (-std::log(2.0) * mean); | |
+ double interval = bias_ + (::log2(q) - 26) * (- ::log(2.0) * mean); | |
// Very large values of interval overflow int64_t. To avoid that, we will | |
// cheat and clamp any huge values to (int64_t max)/2. This is a potential | |
// source of bias, but the mean would need to be such a large value that it's | |
-- | |
2.28.0 | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From 7ade641fa5f17410d1be64aedeb9559598d45572 Mon Sep 17 00:00:00 2001 | |
From: Yaroslav Syrytsia <me@ys.lc> | |
Date: Mon, 19 Oct 2020 16:19:53 +0300 | |
Subject: [PATCH] farmhash fix | |
--- | |
tensorflow/lite/tools/make/downloads/farmhash/src/farmhash.cc | 4 ++-- | |
1 file changed, 2 insertions(+), 2 deletions(-) | |
diff --git a/tensorflow/lite/tools/make/downloads/farmhash/src/farmhash.cc b/tensorflow/lite/tools/make/downloads/farmhash/src/farmhash.cc | |
index cfd4a47e62..7537e42758 100644 | |
--- a/tensorflow/lite/tools/make/downloads/farmhash/src/farmhash.cc | |
+++ b/tensorflow/lite/tools/make/downloads/farmhash/src/farmhash.cc | |
@@ -201,8 +201,8 @@ STATIC_INLINE uint32_t Fetch32(const char *p) { | |
return uint32_in_expected_order(result); | |
} | |
-STATIC_INLINE uint32_t Bswap32(uint32_t val) { return bswap_32(val); } | |
-STATIC_INLINE uint64_t Bswap64(uint64_t val) { return bswap_64(val); } | |
+STATIC_INLINE uint32_t Bswap32(uint32_t val) { return __builtin_bswap32(val); } | |
+STATIC_INLINE uint64_t Bswap64(uint64_t val) { return __builtin_bswap64(val); } | |
// FARMHASH PORTABILITY LAYER: bitwise rot | |
-- | |
2.28.0 | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From 775aad3943c6c790694d22b198fd9892cea3f5be Mon Sep 17 00:00:00 2001 | |
From: Yaroslav Syrytsia <me@ys.lc> | |
Date: Mon, 19 Oct 2020 16:15:39 +0300 | |
Subject: [PATCH] lite: make it compile with gcc 6.3 | |
--- | |
.../lite/delegates/hexagon/builders/conv_2d_helpers.cc | 2 +- | |
.../delegates/hexagon/builders/tests/activations_test.cc | 4 ++-- | |
tensorflow/lite/kernels/activations.cc | 2 +- | |
tensorflow/lite/kernels/activations_test.cc | 4 ++-- | |
tensorflow/lite/kernels/internal/cppmath.h | 2 +- | |
.../lite/kernels/internal/logsoftmax_quantized_test.cc | 4 ++-- | |
tensorflow/lite/kernels/internal/max.h | 4 ++-- | |
tensorflow/lite/kernels/internal/min.h | 2 +- | |
.../lite/kernels/internal/optimized/neon_tensor_utils.cc | 2 +- | |
.../lite/kernels/internal/optimized/optimized_ops.h | 8 ++++---- | |
tensorflow/lite/kernels/internal/quantization_util.cc | 2 +- | |
.../kernels/internal/reference/portable_tensor_utils.cc | 6 +++--- | |
.../lite/kernels/internal/reference/reference_ops.h | 2 +- | |
.../lite/kernels/internal/softmax_quantized_test.cc | 2 +- | |
tensorflow/lite/kernels/test_util.h | 4 ++-- | |
tensorflow/lite/micro/kernels/hard_swish_test.cc | 4 ++-- | |
.../lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h | 4 ++-- | |
tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc | 2 +- | |
.../lite/toco/graph_transformations/quantization_util.cc | 2 +- | |
tensorflow/lite/toco/graph_transformations/quantize.cc | 4 ++-- | |
tensorflow/lite/tools/optimize/quantization_utils.cc | 2 +- | |
21 files changed, 34 insertions(+), 34 deletions(-) | |
diff --git a/tensorflow/lite/delegates/hexagon/builders/conv_2d_helpers.cc b/tensorflow/lite/delegates/hexagon/builders/conv_2d_helpers.cc | |
index b33e28f4e7..cc2ded500d 100644 | |
--- a/tensorflow/lite/delegates/hexagon/builders/conv_2d_helpers.cc | |
+++ b/tensorflow/lite/delegates/hexagon/builders/conv_2d_helpers.cc | |
@@ -226,7 +226,7 @@ TfLiteStatus Conv2dOpBuilder::ProcessPerChannelQuantizedBias( | |
preprocessed_bias_data.reserve(num_scale_values_); | |
for (int i = 0; i < bias_size; ++i) { | |
preprocessed_bias_data.push_back(static_cast<int>( | |
- std::round(std::pow(2, 31) * (dequantized_bias[i] / *bias_max)))); | |
+ ::round(std::pow(2, 31) * (dequantized_bias[i] / *bias_max)))); | |
} | |
// Add nodes for bias. | |
const std::vector<int> bias_shape = {1, 1, 1, bias_size}; | |
diff --git a/tensorflow/lite/delegates/hexagon/builders/tests/activations_test.cc b/tensorflow/lite/delegates/hexagon/builders/tests/activations_test.cc | |
index a908910e20..91f89a7c09 100644 | |
--- a/tensorflow/lite/delegates/hexagon/builders/tests/activations_test.cc | |
+++ b/tensorflow/lite/delegates/hexagon/builders/tests/activations_test.cc | |
@@ -273,10 +273,10 @@ void HardSwishBiasTestImpl() { | |
ASSERT_LE(input_min, -3.0f); | |
ASSERT_GE(input_max, 3.0f); | |
const int quantized_input_negative_three = | |
- std::round(std::numeric_limits<input_type>::min() + | |
+ ::round(std::numeric_limits<input_type>::min() + | |
(-3.0f - input_min) / input_scale); | |
const int quantized_input_positive_three = | |
- std::round(std::numeric_limits<input_type>::min() + | |
+ ::round(std::numeric_limits<input_type>::min() + | |
(3.0f - input_min) / input_scale); | |
std::vector<float> float_input_values; | |
for (int i = quantized_input_negative_three; | |
diff --git a/tensorflow/lite/kernels/activations.cc b/tensorflow/lite/kernels/activations.cc | |
index 0efac36be7..d82693185c 100644 | |
--- a/tensorflow/lite/kernels/activations.cc | |
+++ b/tensorflow/lite/kernels/activations.cc | |
@@ -140,7 +140,7 @@ void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, | |
const float dequantized = | |
input->params.scale * (val - input->params.zero_point); | |
const float transformed = transform(dequantized); | |
- const float rescaled = std::round(transformed * inverse_scale); | |
+ const float rescaled = ::round(transformed * inverse_scale); | |
const int32_t quantized = | |
static_cast<int32_t>(rescaled + output->params.zero_point); | |
data->table[static_cast<uint8_t>(static_cast<T>(val))] = | |
diff --git a/tensorflow/lite/kernels/activations_test.cc b/tensorflow/lite/kernels/activations_test.cc | |
index 6e0316538b..9e788a5862 100644 | |
--- a/tensorflow/lite/kernels/activations_test.cc | |
+++ b/tensorflow/lite/kernels/activations_test.cc | |
@@ -420,10 +420,10 @@ void TestQuantizedHardSwishBias(TensorType tensor_type, float input_min, | |
ASSERT_LE(input_min, -3.0f); | |
ASSERT_GE(input_max, 3.0f); | |
const int quantized_input_negative_three = | |
- std::round(std::numeric_limits<QuantizedType>::min() + | |
+ ::round(std::numeric_limits<QuantizedType>::min() + | |
(-3.0f - input_min) / input_scale); | |
const int quantized_input_positive_three = | |
- std::round(std::numeric_limits<QuantizedType>::min() + | |
+ ::round(std::numeric_limits<QuantizedType>::min() + | |
(3.0f - input_min) / input_scale); | |
std::vector<float> float_input_values; | |
for (int i = quantized_input_negative_three; | |
diff --git a/tensorflow/lite/kernels/internal/cppmath.h b/tensorflow/lite/kernels/internal/cppmath.h | |
index 24a3aec82e..acabb1d517 100644 | |
--- a/tensorflow/lite/kernels/internal/cppmath.h | |
+++ b/tensorflow/lite/kernels/internal/cppmath.h | |
@@ -30,7 +30,7 @@ namespace tflite { | |
#define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \ | |
template <class T> \ | |
inline T tf_name(const T x) { \ | |
- return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \ | |
+ return ::std_name(x); \ | |
} | |
DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round); | |
diff --git a/tensorflow/lite/kernels/internal/logsoftmax_quantized_test.cc b/tensorflow/lite/kernels/internal/logsoftmax_quantized_test.cc | |
index 72e4685d1e..35c345a3f4 100644 | |
--- a/tensorflow/lite/kernels/internal/logsoftmax_quantized_test.cc | |
+++ b/tensorflow/lite/kernels/internal/logsoftmax_quantized_test.cc | |
@@ -59,7 +59,7 @@ void RunLogSoftmaxFloatReference(const uint8* input_data, | |
for (int i = 0; i < ref_buffer_size; i++) { | |
reference_output_data[i] = std::max( | |
0, static_cast<int>( | |
- 255 + std::round(16.0f * reference_output_float_data[i]))); | |
+ 255 + ::round(16.0f * reference_output_float_data[i]))); | |
} | |
} | |
@@ -93,7 +93,7 @@ void RunLogSoftmaxFloatReference(const int8* input_data, | |
for (int i = 0; i < ref_buffer_size; i++) { | |
reference_output_data[i] = std::max( | |
-128, static_cast<int>( | |
- 127 + std::round(16.0f * reference_output_float_data[i]))); | |
+ 127 + ::round(16.0f * reference_output_float_data[i]))); | |
} | |
} | |
diff --git a/tensorflow/lite/kernels/internal/max.h b/tensorflow/lite/kernels/internal/max.h | |
index c18100272d..96e4e663ff 100644 | |
--- a/tensorflow/lite/kernels/internal/max.h | |
+++ b/tensorflow/lite/kernels/internal/max.h | |
@@ -21,12 +21,12 @@ namespace tflite { | |
#if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__) | |
inline float TfLiteMax(const float& x, const float& y) { | |
- return std::max(x, y); | |
+ return ::max(x, y); | |
} | |
#else | |
template <class T> | |
inline T TfLiteMax(const T& x, const T& y) { | |
- return std::fmax(x, y); | |
+ return ::fmax(x, y); | |
} | |
#endif | |
diff --git a/tensorflow/lite/kernels/internal/min.h b/tensorflow/lite/kernels/internal/min.h | |
index 62035dccd8..0a1ece14e3 100644 | |
--- a/tensorflow/lite/kernels/internal/min.h | |
+++ b/tensorflow/lite/kernels/internal/min.h | |
@@ -26,7 +26,7 @@ inline float TfLiteMin(const float& x, const float& y) { | |
#else | |
template <class T> | |
inline T TfLiteMin(const T& x, const T& y) { | |
- return std::fmin(x, y); | |
+ return ::fmin(x, y); | |
} | |
#endif | |
diff --git a/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils.cc b/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils.cc | |
index 0172ba690e..4e5206c6a6 100644 | |
--- a/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils.cc | |
+++ b/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils.cc | |
@@ -2375,7 +2375,7 @@ void NeonSymmetricQuantizeFloats(const float* values, const int size, | |
for (; i < postamble_start; i += 2 * kFloatValuesPerNeonVector) { | |
// Implements the vectorized version of the following: | |
// const int32 quantized_value = static_cast<int32>( | |
- // std::round(*scaling_factor * values[i])); | |
+ // ::round(*scaling_factor * values[i])); | |
float32x4_t value0_f32x4 = vld1q_f32(&values[i]); | |
float32x4_t value1_f32x4 = | |
vld1q_f32(&values[i + kFloatValuesPerNeonVector]); | |
diff --git a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h | |
index 6cabea11ac..d704f0661b 100644 | |
--- a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h | |
+++ b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h | |
@@ -3934,7 +3934,7 @@ inline void Softmax(const SoftmaxParams& params, | |
template <typename T> | |
inline int32_t QuantizeSoftmaxOutput(float prob_rescaled, int32_t zero_point) { | |
- const int32_t prob_rnd = static_cast<int32_t>(std::round(prob_rescaled)); | |
+ const int32_t prob_rnd = static_cast<int32_t>(::round(prob_rescaled)); | |
return prob_rnd + zero_point; | |
} | |
@@ -4354,9 +4354,9 @@ inline void LogSoftmax(const SoftmaxParams& params, float input_scale, | |
const float log_prob = scale * input_data[j] - precomputed; | |
// TODO(tflite): look into better solution. | |
- // Use std::rint over std::round (which is used in | |
+ // Use std::rint over ::round (which is used in | |
// FakeQuant) since it's multiple times faster on tested arm32. | |
- const int32_t prob_quantized = std::rint(log_prob) + params.zero_point; | |
+ const int32_t prob_quantized = ::rint(log_prob) + params.zero_point; | |
output_data[j] = static_cast<T>( | |
std::max(std::min(clamp_max, prob_quantized), clamp_min)); | |
} | |
@@ -6807,7 +6807,7 @@ inline void BroadcastPow4D(const RuntimeShape& unextended_input1_shape, | |
if (unextended_input2_shape.FlatSize() == 1) { | |
static const float epsilon = 1e-5; | |
const T exponent = input2_data[0]; | |
- const int int_exponent = static_cast<int>(std::round(exponent)); | |
+ const int int_exponent = static_cast<int>(::round(exponent)); | |
if ((std::abs(input2_data[0] - int_exponent) < epsilon) && | |
(int_exponent >= 1)) { | |
ArithmeticParams params; | |
diff --git a/tensorflow/lite/kernels/internal/quantization_util.cc b/tensorflow/lite/kernels/internal/quantization_util.cc | |
index cf431cffdf..d6802ec61c 100644 | |
--- a/tensorflow/lite/kernels/internal/quantization_util.cc | |
+++ b/tensorflow/lite/kernels/internal/quantization_util.cc | |
@@ -372,7 +372,7 @@ void FakeQuantizeArray(const float nudged_scale, const float nudged_min, | |
} | |
bool CheckedLog2(const float x, int* log2_result) { | |
- // Using TfLiteRound instead of std::round and std::log instead of | |
+ // Using TfLiteRound instead of ::round and std::log instead of | |
// std::log2 to work around these functions being missing in a toolchain | |
// used in some TensorFlow tests as of May 2018. | |
const float x_log2 = std::log(x) * (1.0f / std::log(2.0f)); | |
diff --git a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc | |
index 338adf8c2e..9c465c82e6 100644 | |
--- a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc | |
+++ b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc | |
@@ -78,8 +78,8 @@ void PortableAsymmetricQuantizeFloats(const float* values, const int size, | |
const double qmin_double = kMinScale; | |
const double qmax_double = kMaxScale; | |
const auto minmax = std::minmax_element(values, values + size); | |
- const double rmin = std::fmin(0, *minmax.first); | |
- const double rmax = std::fmax(0, *minmax.second); | |
+ const double rmin = ::fmin(0, *minmax.first); | |
+ const double rmax = ::fmax(0, *minmax.second); | |
if (rmin == rmax) { | |
memset(quantized_values, 0, size * sizeof(int8_t)); | |
*scaling_factor = 1; | |
@@ -499,7 +499,7 @@ void PortableApplyLayerNormFloat(const int16_t* input, | |
normalized_value * layer_norm_weights[i] * layer_norm_scale + | |
bias[i] * bias_scale; | |
const int32_t quant_output = static_cast<int32_t>( | |
- std::round(weighted_normalized_value * std::pow(2, 12))); | |
+ ::round(weighted_normalized_value * std::pow(2, 12))); | |
output[index] = std::min(int16_max, std::max(int16_min, quant_output)); | |
} | |
} | |
diff --git a/tensorflow/lite/kernels/internal/reference/reference_ops.h b/tensorflow/lite/kernels/internal/reference/reference_ops.h | |
index df771bcca2..f416ad2200 100644 | |
--- a/tensorflow/lite/kernels/internal/reference/reference_ops.h | |
+++ b/tensorflow/lite/kernels/internal/reference/reference_ops.h | |
@@ -737,7 +737,7 @@ void PackWithScaling(const PackParams& params, | |
auto input_ptr = input_data[i]; | |
for (int j = 0; j < copy_size; ++j) { | |
const int32_t value = | |
- static_cast<int32_t>(std::round(input_ptr[j] * scale + bias)) + | |
+ static_cast<int32_t>(::round(input_ptr[j] * scale + bias)) + | |
output_zeropoint; | |
output_ptr[j] = | |
static_cast<uint8_t>(std::max(std::min(255, value), 0)); | |
diff --git a/tensorflow/lite/kernels/internal/softmax_quantized_test.cc b/tensorflow/lite/kernels/internal/softmax_quantized_test.cc | |
index 9b5ef171ea..500d4f3fd4 100644 | |
--- a/tensorflow/lite/kernels/internal/softmax_quantized_test.cc | |
+++ b/tensorflow/lite/kernels/internal/softmax_quantized_test.cc | |
@@ -57,7 +57,7 @@ void RunSoftmaxFloatReference(const uint8* input_data, | |
for (int i = 0; i < ref_buffer_size; i++) { | |
reference_output_data[i] = std::min( | |
255, | |
- static_cast<int>(std::round(256.0f * reference_output_float_data[i]))); | |
+ static_cast<int>(::round(256.0f * reference_output_float_data[i]))); | |
} | |
} | |
diff --git a/tensorflow/lite/kernels/test_util.h b/tensorflow/lite/kernels/test_util.h | |
index 9cd272f303..3101a867e0 100644 | |
--- a/tensorflow/lite/kernels/test_util.h | |
+++ b/tensorflow/lite/kernels/test_util.h | |
@@ -69,7 +69,7 @@ inline std::vector<T> Quantize(const std::vector<float>& data, float scale, | |
q.push_back(static_cast<T>(std::max<float>( | |
std::numeric_limits<T>::min(), | |
std::min<float>(std::numeric_limits<T>::max(), | |
- std::round(zero_point + (f / scale)))))); | |
+ ::round(zero_point + (f / scale)))))); | |
} | |
return q; | |
} | |
@@ -750,7 +750,7 @@ class SingleOpModel { | |
} else if (zero_point_double > qmax_double) { | |
nudged_zero_point = qmax; | |
} else { | |
- nudged_zero_point = static_cast<T>(std::round(zero_point_double)); | |
+ nudged_zero_point = static_cast<T>(::round(zero_point_double)); | |
} | |
// The zero point should always be in the range of quantized value, | |
diff --git a/tensorflow/lite/micro/kernels/hard_swish_test.cc b/tensorflow/lite/micro/kernels/hard_swish_test.cc | |
index 9134587002..3e3acb5e67 100644 | |
--- a/tensorflow/lite/micro/kernels/hard_swish_test.cc | |
+++ b/tensorflow/lite/micro/kernels/hard_swish_test.cc | |
@@ -143,9 +143,9 @@ void TestHardSwishQuantizedBias(const int size, const T* output_data, | |
// values. | |
TF_LITE_MICRO_EXPECT_LE(input_min, -3.0f); | |
TF_LITE_MICRO_EXPECT_GE(input_max, 3.0f); | |
- const int quantized_input_negative_three = std::round( | |
+ const int quantized_input_negative_three = ::round( | |
std::numeric_limits<T>::min() + (-3.0f - input_min) / input_scale); | |
- const int quantized_input_positive_three = std::round( | |
+ const int quantized_input_positive_three = ::round( | |
std::numeric_limits<T>::min() + (3.0f - input_min) / input_scale); | |
for (int i = quantized_input_negative_three; | |
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h b/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h | |
index a1d14df135..4a9c30f0af 100644 | |
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h | |
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h | |
@@ -102,7 +102,7 @@ inline void QuantizeMultiplierForInt24(float multiplier, | |
// Special cased to 24bit: | |
const float q = std::frexp(multiplier, shift); | |
- auto q_fixed = static_cast<int64_t>(std::round(q * (1 << 23))); | |
+ auto q_fixed = static_cast<int64_t>(::round(q * (1 << 23))); | |
TFLITE_CHECK(q_fixed <= (1 << 23)); | |
if (q_fixed == (1 << 23)) { | |
@@ -126,7 +126,7 @@ inline int CreateQConstantForInt24(int integer_bits, float f) { | |
const float max_bounds = static_cast<float>(INT24_MAX); | |
int fractional_bits = 23 - integer_bits; | |
- float raw = std::round(f * static_cast<float>(1 << fractional_bits)); | |
+ float raw = ::round(f * static_cast<float>(1 << fractional_bits)); | |
raw = std::max(raw, min_bounds); | |
raw = std::min(raw, max_bounds); | |
return static_cast<int>(raw); | |
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc | |
index 79a44e2c67..acaf8344fb 100644 | |
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc | |
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc | |
@@ -136,7 +136,7 @@ TfLiteStatus CalculateSoftmaxOpData(TfLiteContext* context, | |
std::exp((-scaled_input) * static_cast<float>(params->beta)); | |
float exponent_scaled = | |
- std::round(exp_value * static_cast<float>(1 << kExpFractionalBits)); | |
+ ::round(exp_value * static_cast<float>(1 << kExpFractionalBits)); | |
op_data->exp_lut[i] = static_cast<uint16_t>(exponent_scaled); | |
} | |
} | |
diff --git a/tensorflow/lite/toco/graph_transformations/quantization_util.cc b/tensorflow/lite/toco/graph_transformations/quantization_util.cc | |
index 76ead65810..ae8c3b8cf0 100644 | |
--- a/tensorflow/lite/toco/graph_transformations/quantization_util.cc | |
+++ b/tensorflow/lite/toco/graph_transformations/quantization_util.cc | |
@@ -149,7 +149,7 @@ std::unique_ptr<GenericBuffer> QuantizeBuffer( | |
} else { | |
scaled_val = quantization_params.zero_point + inverse_scale * src_val; | |
} | |
- auto integer_val = tflite::SafeCast<DataType<A>>(std::round(scaled_val)); | |
+ auto integer_val = tflite::SafeCast<DataType<A>>(::round(scaled_val)); | |
// In addition to its effect on the choice of quantization params upstream | |
// of here, narrow_range also means nudge the min quantized value by +1, | |
// so e.g. uint8 values get constrained to [1, 255]. | |
diff --git a/tensorflow/lite/toco/graph_transformations/quantize.cc b/tensorflow/lite/toco/graph_transformations/quantize.cc | |
index c5848f83dd..aa19dc3a7e 100644 | |
--- a/tensorflow/lite/toco/graph_transformations/quantize.cc | |
+++ b/tensorflow/lite/toco/graph_transformations/quantize.cc | |
@@ -312,11 +312,11 @@ bool IsExactlyRepresentable(double real_value, ArrayDataType data_type, | |
const double scaled_value = | |
quantization_params.zero_point + real_value / quantization_params.scale; | |
const double fractional_scaled_value = | |
- scaled_value - std::round(scaled_value); | |
+ scaled_value - ::round(scaled_value); | |
if (std::abs(fractional_scaled_value) > 1e-12) { | |
return false; | |
} | |
- const double rounded_scaled_value = std::round(scaled_value); | |
+ const double rounded_scaled_value = ::round(scaled_value); | |
if (data_type == ArrayDataType::kUint8) { | |
if (rounded_scaled_value < 0 || rounded_scaled_value > 255) { | |
return false; | |
diff --git a/tensorflow/lite/tools/optimize/quantization_utils.cc b/tensorflow/lite/tools/optimize/quantization_utils.cc | |
index 81110071dc..f0c8e9b097 100644 | |
--- a/tensorflow/lite/tools/optimize/quantization_utils.cc | |
+++ b/tensorflow/lite/tools/optimize/quantization_utils.cc | |
@@ -77,7 +77,7 @@ void GetAsymmetricQuantizationParams( | |
} else if (zero_point_from_min > quant_max_float) { | |
zero_point = static_cast<int64_t>(quant_max); | |
} else { | |
- zero_point = static_cast<int64_t>(std::round(zero_point_from_min)); | |
+ zero_point = static_cast<int64_t>(::round(zero_point_from_min)); | |
} | |
quantization_params->min = std::vector<float>(1, min); | |
quantization_params->max = std::vector<float>(1, max); | |
-- | |
2.28.0 | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#if !defined(EIGEN_GPUCC) | |
// HIP and CUDA do not support long double. | |
#include <cmath> | |
template<> | |
EIGEN_DEVICE_FUNC | |
EIGEN_ALWAYS_INLINE long double absdiff(const long double& x, const long double& y) { | |
return __builtin_fabsl(x - y); | |
} | |
#endif |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment