Skip to content

Instantly share code, notes, and snippets.

@nmoinvaz
Last active December 1, 2021 18:55
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save nmoinvaz/b31b11c35724a549476b37da4ef8cd17 to your computer and use it in GitHub Desktop.
Save nmoinvaz/b31b11c35724a549476b37da4ef8cd17 to your computer and use it in GitHub Desktop.
adler32_benchmark_pr_1053
/*
cmake_minimum_required(VERSION 3.17)
cmake_policy(SET CMP0091 NEW)
project(adler32_benchmark CXX)
set(BENCHMARK_ENABLE_GTEST_TESTS OFF)
add_executable(adler32_benchmark)
add_subdirectory(benchmark)
target_sources(adler32_benchmark PRIVATE adler32_benchmark.cc)
target_include_directories(adler32_benchmark PRIVATE benchmark/include)
target_link_directories(adler32_benchmark PRIVATE benchmark/build/src/Release)
target_link_libraries(adler32_benchmark benchmark shlwapi)
*/
#include <stdint.h>
#include <stdio.h>
#include <stdint.h>
#include <immintrin.h>
#include <benchmark/benchmark.h>
#define BASE 65521U /* largest prime smaller than 65536 */
#define NMAX 5552
/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
#define DO1(sum1, sum2, buf, i) {(sum1) += buf[(i)]; (sum2) += (sum1);}
#define DO2(sum1, sum2, buf, i) {DO1(sum1, sum2, buf, i); DO1(sum1, sum2, buf, i+1);}
#define DO4(sum1, sum2, buf, i) {DO2(sum1, sum2, buf, i); DO2(sum1, sum2, buf, i+2);}
#define DO8(sum1, sum2, buf, i) {DO4(sum1, sum2, buf, i); DO4(sum1, sum2, buf, i+4);}
#define DO16(sum1, sum2, buf) {DO8(sum1, sum2, buf, 0); DO8(sum1, sum2, buf, 8);}
/* Only enable likely/unlikely if the compiler is known to support it */
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__INTEL_COMPILER) || defined(__Clang__)
# define LIKELY_NULL(x) __builtin_expect((x) != 0, 0)
# define LIKELY(x) __builtin_expect(!!(x), 1)
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
# define PREFETCH_L1(addr) __builtin_prefetch(addr, 0, 3)
# define PREFETCH_L2(addr) __builtin_prefetch(addr, 0, 2)
# define PREFETCH_RW(addr) __builtin_prefetch(addr, 1, 2)
#elif defined(__WIN__)
# include <xmmintrin.h>
# define LIKELY_NULL(x) x
# define LIKELY(x) x
# define UNLIKELY(x) x
# define PREFETCH_L1(addr) _mm_prefetch((char *) addr, _MM_HINT_T0)
# define PREFETCH_L2(addr) _mm_prefetch((char *) addr, _MM_HINT_T1)
# define PREFETCH_RW(addr) _mm_prefetch((char *) addr, _MM_HINT_T1)
#else
# define LIKELY_NULL(x) x
# define LIKELY(x) x
# define UNLIKELY(x) x
# define PREFETCH_L1(addr) addr
# define PREFETCH_L2(addr) addr
# define PREFETCH_RW(addr) addr
#endif /* (un)likely */
#if defined(_MSC_VER)
# define ALIGNED_(x) __declspec(align(x))
#else
# if defined(__GNUC__)
# define ALIGNED_(x) __attribute__ ((aligned(x)))
# endif
#endif
static inline uint32_t adler32_len_1(uint32_t adler, const unsigned char *buf, uint32_t sum2) {
adler += buf[0];
adler %= BASE;
sum2 += adler;
sum2 %= BASE;
return adler | (sum2 << 16);
}
static inline uint32_t adler32_len_16(uint32_t adler, const unsigned char *buf, size_t len, uint32_t sum2) {
while (len) {
--len;
adler += *buf++;
sum2 += adler;
}
adler %= BASE;
sum2 %= BASE; /* only added so many BASE's */
return adler | (sum2 << 16);
}
static inline uint32_t adler32_len_64(uint32_t adler, const unsigned char *buf, size_t len, uint32_t sum2) {
while (len >= 16) {
len -= 16;
DO16(adler, sum2, buf);
buf += 16;
}
/* Process tail (len < 16). */
return adler32_len_16(adler, buf, len, sum2);
}
/* 64 bit horizontal sum, adapted from Agner Fog's vector library. */
static inline uint64_t hsum(__m256i x) {
__m256i sum1 = _mm256_shuffle_epi32(x, 0x0E);
__m256i sum2 = _mm256_add_epi64(x, sum1);
__m128i sum3 = _mm256_extracti128_si256(sum2, 1);
#if defined(__x86_64__) || defined(_M_X64)
return _mm_cvtsi128_si64(_mm_add_epi64(_mm256_castsi256_si128(sum2), sum3));
#else
__m128i ret_vec = _mm_add_epi64(_mm256_castsi256_si128(sum2), sum3);
uint64_t ret_val;
_mm_storel_epi64((__m128i*)&ret_val, ret_vec);
return ret_val;
#endif
}
uint32_t adler32_avx2_new(uint32_t adler, const unsigned char *buf, size_t len) {
uint32_t sum2;
/* split Adler-32 into component sums */
sum2 = (adler >> 16) & 0xffff;
adler &= 0xffff;
/* in case user likes doing a byte at a time, keep it fast */
if (UNLIKELY(len == 1))
return adler32_len_1(adler, buf, sum2);
/* initial Adler-32 value (deferred check for len == 1 speed) */
if (UNLIKELY(buf == NULL))
return 1L;
/* in case short lengths are provided, keep it somewhat fast */
if (UNLIKELY(len < 16))
return adler32_len_16(adler, buf, len, sum2);
const __m256i vs_mask = _mm256_setr_epi32(0, 0, 0, 0, 0, 0, 0, -1);
__m256i vs1 = _mm256_set1_epi32(adler);
__m256i vs2 = _mm256_set1_epi32(sum2);
vs1 = _mm256_and_si256(vs1, vs_mask);
vs2 = _mm256_and_si256(vs2, vs_mask);
const __m256i dot1v = _mm256_set1_epi8(1);
const __m256i dot2v = _mm256_setr_epi8(32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
const __m256i dot3v = _mm256_set1_epi16(1);
while (len >= 32) {
__m256i vs1_0 = vs1;
int k = (len < NMAX ? (int)len : NMAX);
k -= k % 32;
len -= k;
while (k >= 32) {
/*
vs1 = adler + sum(c[i])
vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] )
*/
__m256i vbuf = _mm256_loadu_si256((__m256i*)buf);
buf += 32;
k -= 32;
__m256i v_short_sum1 = _mm256_maddubs_epi16(vbuf, dot1v); // multiply-add, resulting in 8 shorts.
__m256i vsum1 = _mm256_madd_epi16(v_short_sum1, dot3v); // sum 8 shorts to 4 int32_t;
__m256i v_short_sum2 = _mm256_maddubs_epi16(vbuf, dot2v);
vs1 = _mm256_add_epi32(vsum1, vs1);
__m256i vsum2 = _mm256_madd_epi16(v_short_sum2, dot3v);
vs1_0 = _mm256_slli_epi32(vs1_0, 5);
vsum2 = _mm256_add_epi32(vsum2, vs2);
vs2 = _mm256_add_epi32(vsum2, vs1_0);
vs1_0 = vs1;
}
/* The compiler is generating the following sequence for this integer modulus
* when done the scalar way, in GPRs:
adler = (s1_unpack[0] % BASE) + (s1_unpack[1] % BASE) + (s1_unpack[2] % BASE) + (s1_unpack[3] % BASE) +
(s1_unpack[4] % BASE) + (s1_unpack[5] % BASE) + (s1_unpack[6] % BASE) + (s1_unpack[7] % BASE);
mov $0x80078071,%edi // move magic constant into 32 bit register %edi
...
vmovd %xmm1,%esi // move vector lane 0 to 32 bit register %esi
mov %rsi,%rax // zero-extend this value to 64 bit precision in %rax
imul %rdi,%rsi // do a signed multiplication with magic constant and vector element
shr $0x2f,%rsi // shift right by 47
imul $0xfff1,%esi,%esi // do a signed multiplication with value truncated to 32 bits with 0xfff1
sub %esi,%eax // subtract lower 32 bits of original vector value from modified one above
...
// repeats for each element with vpextract instructions
This is tricky with AVX2 for a number of reasons:
1.) There's no 64 bit multiplication instruction, but there is a sequence to get there
2.) There's ways to extend vectors to 64 bit precision, but no simple way to truncate
back down to 32 bit precision later (there is in AVX512)
3.) Full width integer multiplications aren't cheap
We can, however, cast up to 64 bit precision on all 8 integers at once, and do a relatively
cheap sequence for horizontal sums. Then, we simply do the integer modulus on the resulting
64 bit GPR, on a scalar value
*/
/* Will translate to nops */
__m128i s1lo = _mm256_castsi256_si128(vs1);
__m128i s2lo = _mm256_castsi256_si128(vs2);
/* Requires vextracti128 */
__m128i s1hi = _mm256_extracti128_si256(vs1, 1);
__m128i s2hi = _mm256_extracti128_si256(vs2, 1);
/* Convert up to 64 bit precision to prevent overflow */
__m256i s1lo256 = _mm256_cvtepi32_epi64(s1lo);
__m256i s1hi256 = _mm256_cvtepi32_epi64(s1hi);
__m256i s2lo256 = _mm256_cvtepi32_epi64(s2lo);
__m256i s2hi256 = _mm256_cvtepi32_epi64(s2hi);
/* Sum vectors in existing lanes */
__m256i s1_sum = _mm256_add_epi64(s1lo256, s1hi256);
__m256i s2_sum = _mm256_add_epi64(s2lo256, s2hi256);
/* In AVX2-land, this trip through GPRs will probably be unvoidable, as there's no cheap and easy
* conversion from 64 bit integer to 32 bit. This casting to 32 bit is cheap through GPRs
* (just register aliasing), and safe, as our base is significantly smaller than UINT32_MAX */
adler = (uint32_t)(hsum(s1_sum) % BASE);
sum2 = (uint32_t)(hsum(s2_sum) % BASE);
vs1 = _mm256_set1_epi32(adler);
vs1 = _mm256_and_si256(vs1, vs_mask);
vs2 = _mm256_set1_epi32(sum2);
vs2 = _mm256_and_si256(vs2, vs_mask);
}
/* Process tail (len < 16). */
return adler32_len_16(adler, buf, len, sum2);
}
uint32_t adler32_avx2_old(uint32_t adler, const unsigned char *buf, size_t len) {
uint32_t sum2;
/* split Adler-32 into component sums */
sum2 = (adler >> 16) & 0xffff;
adler &= 0xffff;
/* in case user likes doing a byte at a time, keep it fast */
if (UNLIKELY(len == 1))
return adler32_len_1(adler, buf, sum2);
/* initial Adler-32 value (deferred check for len == 1 speed) */
if (UNLIKELY(buf == NULL))
return 1L;
/* in case short lengths are provided, keep it somewhat fast */
if (UNLIKELY(len < 16))
return adler32_len_16(adler, buf, len, sum2);
uint32_t ALIGNED_(32) s1[8], s2[8];
memset(s1, 0, sizeof(s1)); s1[7] = adler; // TODO: would a masked load be faster?
memset(s2, 0, sizeof(s2)); s2[7] = sum2;
char ALIGNED_(32) dot1[32] = \
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
__m256i dot1v = _mm256_load_si256((__m256i*)dot1);
char ALIGNED_(32) dot2[32] = \
{32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17,
16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
__m256i dot2v = _mm256_load_si256((__m256i*)dot2);
short ALIGNED_(32) dot3[16] = \
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
__m256i dot3v = _mm256_load_si256((__m256i*)dot3);
// We will need to multiply by
char ALIGNED_(32) shift[16] = {5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
__m128i shiftv = _mm_load_si128((__m128i*)shift);
while (len >= 32) {
__m256i vs1 = _mm256_load_si256((__m256i*)s1);
__m256i vs2 = _mm256_load_si256((__m256i*)s2);
__m256i vs1_0 = vs1;
int k = (len < NMAX ? (int)len : NMAX);
k -= k % 32;
len -= k;
while (k >= 32) {
/*
vs1 = adler + sum(c[i])
vs2 = sum2 + 16 vs1 + sum( (16-i+1) c[i] )
*/
__m256i vbuf = _mm256_loadu_si256((__m256i*)buf);
buf += 32;
k -= 32;
__m256i v_short_sum1 = _mm256_maddubs_epi16(vbuf, dot1v); // multiply-add, resulting in 8 shorts.
__m256i vsum1 = _mm256_madd_epi16(v_short_sum1, dot3v); // sum 8 shorts to 4 int32_t;
__m256i v_short_sum2 = _mm256_maddubs_epi16(vbuf, dot2v);
vs1 = _mm256_add_epi32(vsum1, vs1);
__m256i vsum2 = _mm256_madd_epi16(v_short_sum2, dot3v);
vs1_0 = _mm256_sll_epi32(vs1_0, shiftv);
vsum2 = _mm256_add_epi32(vsum2, vs2);
vs2 = _mm256_add_epi32(vsum2, vs1_0);
vs1_0 = vs1;
}
// At this point, we have partial sums stored in vs1 and vs2. There are AVX512 instructions that
// would allow us to sum these quickly (VP4DPWSSD). For now, just unpack and move on.
uint32_t ALIGNED_(32) s1_unpack[8];
uint32_t ALIGNED_(32) s2_unpack[8];
_mm256_store_si256((__m256i*)s1_unpack, vs1);
_mm256_store_si256((__m256i*)s2_unpack, vs2);
adler = (s1_unpack[0] % BASE) + (s1_unpack[1] % BASE) + (s1_unpack[2] % BASE) + (s1_unpack[3] % BASE) +
(s1_unpack[4] % BASE) + (s1_unpack[5] % BASE) + (s1_unpack[6] % BASE) + (s1_unpack[7] % BASE);
adler %= BASE;
s1[7] = adler;
sum2 = (s2_unpack[0] % BASE) + (s2_unpack[1] % BASE) + (s2_unpack[2] % BASE) + (s2_unpack[3] % BASE) +
(s2_unpack[4] % BASE) + (s2_unpack[5] % BASE) + (s2_unpack[6] % BASE) + (s2_unpack[7] % BASE);
sum2 %= BASE;
s2[7] = sum2;
}
/* Process tail (len < 16). */
return adler32_len_16(adler, buf, len, sum2);
}
static unsigned char * window;
static int32_t window_len;
static int32_t scan_pos;
static void adler32_avx2_old_bench(benchmark::State& state) {
uint32_t adler = 1;
scan_pos = 0;
while (state.KeepRunning()) {
adler = adler32_avx2_old(adler, window + scan_pos, window_len - scan_pos);
scan_pos++;
if (scan_pos > window_len) {
scan_pos = 0;
}
benchmark::DoNotOptimize(adler);
}
}
BENCHMARK(adler32_avx2_old_bench);
static void adler32_avx2_new_bench(benchmark::State& state) {
uint32_t adler = 1;
scan_pos = 0;
while (state.KeepRunning()) {
adler = adler32_avx2_new(adler, window + scan_pos, window_len - scan_pos);
scan_pos++;
if (scan_pos > window_len) {
scan_pos = 0;
}
benchmark::DoNotOptimize(adler);
}
}
BENCHMARK(adler32_avx2_new_bench);
int main(int argc, char** argv)
{
window_len = 1024*1024;
window = (unsigned char *)malloc(window_len);
scan_pos = 0;
::benchmark::Initialize(&argc, argv);
::benchmark::RunSpecifiedBenchmarks();
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment