Skip to content

Instantly share code, notes, and snippets.

@benanil
Last active May 21, 2023 18:31
Show Gist options
  • Save benanil/581a895af67f53a6adfa5bd79b86212f to your computer and use it in GitHub Desktop.
Save benanil/581a895af67f53a6adfa5bd79b86212f to your computer and use it in GitHub Desktop.
#include <math.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <fcntl.h>
#include "Timer.hpp"
// #include <dlfcn.h>
// #include <unistd.h>
// #include <sys/mman.h>
// #include <sys/time.h>
#define ABI
#define countof(a) ((int)(sizeof(a) / sizeof(0[a])))
static uint64_t xoroshiro128plus(uint64_t s[2])
{
uint64_t s0 = s[0];
uint64_t s1 = s[1];
uint64_t result = s0 + s1;
s1 ^= s0;
s[0] = ((s0 << 24) | (s0 >> 40)) ^ s1 ^ (s1 << 16);
s[1] = (s1 << 37) | (s1 >> 27);
return result;
}
enum hf_type : int {
/* 32 bits */
HF32_XOR, // x ^= const32
HF32_MUL, // x *= const32 (odd)
HF32_ADD, // x += const32
HF32_ROT, // x = (x << const5) | (x >> (32 - const5))
HF32_NOT, // x = ~x
HF32_BSWAP,// x = bswap32(x)
HF32_XORL, // x ^= x << const5
HF32_XORR, // x ^= x >> const5
HF32_ADDL, // x += x << const5
HF32_SUBL, // x -= x << const5
/* 64 bits */
HF64_XOR,
HF64_MUL,
HF64_ADD,
HF64_ROT,
HF64_NOT,
HF64_BSWAP,
HF64_XORL,
HF64_XORR,
HF64_ADDL,
HF64_SUBL,
};
static const char hf_names[][8] = {
"32xor",
"32mul",
"32add",
"32rot",
"32not",
"32bswap",
"32xorl",
"32xorr",
"32addl",
"32subl",
"64xor",
"64mul",
"64add",
"64rot",
"64not",
"64bswap",
"64xorl",
"64xorr",
"64addl",
"64subl",
};
#define FOP_LOCKED (1 << 0)
struct hf_op {
hf_type type;
uint64_t constant;
int flags;
};
/* Randomize the constants of the given hash operation.
*/
static void hf_randomize(struct hf_op* op, uint64_t s[2])
{
uint64_t r = xoroshiro128plus(s);
switch (op->type) {
case HF32_NOT:
case HF64_NOT:
case HF32_BSWAP:
case HF64_BSWAP:
op->constant = 0;
break;
case HF32_XOR:
case HF32_ADD:
op->constant = (uint32_t)r;
break;
case HF32_MUL:
op->constant = (uint32_t)r | 1;
break;
case HF32_ROT:
case HF32_XORL:
case HF32_XORR:
case HF32_ADDL:
case HF32_SUBL:
op->constant = 1 + r % 31;
break;
case HF64_XOR:
case HF64_ADD:
op->constant = r;
break;
case HF64_MUL:
op->constant = r | 1;
break;
case HF64_ROT:
case HF64_XORL:
case HF64_XORR:
case HF64_ADDL:
case HF64_SUBL:
op->constant = 1 + r % 63;
break;
}
}
#define F_U64 (8)
#define F_TINY (1 << 1) // don't use big constants
static void hf_gen(hf_op* op, uint64_t s[2], int flags)
{
uint64_t r = xoroshiro128plus(s);
int min = flags & F_TINY ? 3 : 0;
op->type = (hf_type)((r % (9 - min)) + min + (flags & F_U64 ? 9 : 0));
hf_randomize(op, s);
}
// Return 1 if these operations may be adjacent
static int hf_type_valid(hf_type a, hf_type b)
{
switch (a) {
case HF32_NOT:
case HF32_BSWAP:
case HF32_XOR:
case HF32_MUL:
case HF32_ADD:
case HF32_ROT:
case HF64_NOT:
case HF64_BSWAP:
case HF64_XOR:
case HF64_MUL:
case HF64_ADD:
case HF64_ROT:
return a != b;
case HF32_XORL:
case HF32_XORR:
case HF32_ADDL:
case HF32_SUBL:
case HF64_XORL:
case HF64_XORR:
case HF64_ADDL:
case HF64_SUBL:
return 1;
}
abort();
}
static void hf_genfunc(struct hf_op* ops, int n, int flags, uint64_t s[2])
{
hf_gen(ops, s, flags);
for (int i = 1; i < n; i++) {
do {
hf_gen(ops + i, s, flags);
} while (!hf_type_valid(ops[i - 1].type, ops[i].type));
}
}
/* Randomize the parameters of the given functoin.
*/
static void hf_randfunc(struct hf_op* ops, int n, uint64_t s[2])
{
for (int i = 0; i < n; i++)
if (!(ops[i].flags & FOP_LOCKED))
hf_randomize(ops + i, s);
}
static void hf_print(const struct hf_op* op, char* buf)
{
unsigned long long c = op->constant;
switch (op->type) {
case HF32_NOT:
case HF64_NOT:
sprintf(buf, "x = ~x;");
break;
case HF32_BSWAP:
sprintf(buf, "x = __builtin_bswap32(x);");
break;
case HF64_BSWAP:
sprintf(buf, "x = __builtin_bswap64(x);");
break;
case HF32_XOR:
sprintf(buf, "x ^= 0x%08llx;", c);
break;
case HF32_MUL:
sprintf(buf, "x *= 0x%08llx;", c);
break;
case HF32_ADD:
sprintf(buf, "x += 0x%08llx;", c);
break;
case HF32_ROT:
sprintf(buf, "x = (x << %llu) | (x >> %lld);", c, 32 - c);
break;
case HF32_XORL:
sprintf(buf, "x ^= x << %llu;", c);
break;
case HF32_XORR:
sprintf(buf, "x ^= x >> %llu;", c);
break;
case HF32_ADDL:
sprintf(buf, "x += x << %llu;", c);
break;
case HF32_SUBL:
sprintf(buf, "x -= x << %llu;", c);
break;
case HF64_XOR:
sprintf(buf, "x ^= 0x%016llx;", c);
break;
case HF64_MUL:
sprintf(buf, "x *= 0x%016llx;", c);
break;
case HF64_ADD:
sprintf(buf, "x += 0x%016llx;", c);
break;
case HF64_ROT:
sprintf(buf, "x = (x << %llu) | (x >> %lld);", c, 64 - c);
break;
case HF64_XORL:
sprintf(buf, "x ^= x << %llu;", c);
break;
case HF64_XORR:
sprintf(buf, "x ^= x >> %llu;", c);
break;
case HF64_ADDL:
sprintf(buf, "x += x << %llu;", c);
break;
case HF64_SUBL:
sprintf(buf, "x -= x << %llu;", c);
break;
}
}
static void hf_printfunc(const struct hf_op* ops, int n, FILE* f)
{
if (ops[0].type <= HF32_SUBL)
fprintf(f, "uint32_t\nhash(uint32_t x)\n{\n");
else
fprintf(f, "uint64_t\nhash(uint64_t x)\n{\n");
for (int i = 0; i < n; i++) {
char buf[64];
hf_print(ops + i, buf);
fprintf(f, " %s\n", buf);
}
fprintf(f, " return x;\n}\n");
}
static unsigned char* hf_compile(const struct hf_op* ops, int n, unsigned char* buf)
{
if (ops[0].type <= HF32_SUBL) {
/* mov eax, edi*/
*buf++ = 0x89;
*buf++ = 0xf8;
}
else {
/* mov rax, rdi*/
*buf++ = 0x48;
*buf++ = 0x89;
*buf++ = 0xf8;
}
for (int i = 0; i < n; i++) {
switch (ops[i].type) {
case HF32_NOT:
/* not eax */
*buf++ = 0xf7;
*buf++ = 0xd0;
break;
case HF32_BSWAP:
/* bswap eax */
*buf++ = 0x0f;
*buf++ = 0xc8;
break;
case HF32_XOR:
/* xor eax, imm32 */
*buf++ = 0x35;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
break;
case HF32_MUL:
/* imul eax, eax, imm32 */
*buf++ = 0x69;
*buf++ = 0xc0;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
break;
case HF32_ADD:
/* add eax, imm32 */
*buf++ = 0x05;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
break;
case HF32_ROT:
/* rol eax, imm8 */
*buf++ = 0xc1;
*buf++ = 0xc0;
*buf++ = ops[i].constant;
break;
case HF32_XORL:
/* mov edi, eax */
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl edi, imm8 */
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* xor eax, edi */
*buf++ = 0x31;
*buf++ = 0xf8;
break;
case HF32_XORR:
/* mov edi, eax */
*buf++ = 0x89;
*buf++ = 0xc7;
/* shr edi, imm8 */
*buf++ = 0xc1;
*buf++ = 0xef;
*buf++ = ops[i].constant;
/* xor eax, edi */
*buf++ = 0x31;
*buf++ = 0xf8;
break;
case HF32_ADDL:
/* mov edi, eax */
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl edi, imm8 */
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* add eax, edi */
*buf++ = 0x01;
*buf++ = 0xf8;
break;
case HF32_SUBL:
/* mov edi, eax */
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl edi, imm8 */
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* sub eax, edi */
*buf++ = 0x29;
*buf++ = 0xf8;
break;
case HF64_NOT:
/* not rax */
*buf++ = 0x48;
*buf++ = 0xf7;
*buf++ = 0xd0;
break;
case HF64_BSWAP:
/* bswap rax */
*buf++ = 0x48;
*buf++ = 0x0f;
*buf++ = 0xc8;
break;
case HF64_XOR:
/* mov rdi, imm64 */
*buf++ = 0x48;
*buf++ = 0xbf;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
*buf++ = ops[i].constant >> 32;
*buf++ = ops[i].constant >> 40;
*buf++ = ops[i].constant >> 48;
*buf++ = ops[i].constant >> 56;
/* xor rax, rdi */
*buf++ = 0x48;
*buf++ = 0x31;
*buf++ = 0xf8;
break;
case HF64_MUL:
/* mov rdi, imm64 */
*buf++ = 0x48;
*buf++ = 0xbf;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
*buf++ = ops[i].constant >> 32;
*buf++ = ops[i].constant >> 40;
*buf++ = ops[i].constant >> 48;
*buf++ = ops[i].constant >> 56;
/* imul rax, rdi */
*buf++ = 0x48;
*buf++ = 0x0f;
*buf++ = 0xaf;
*buf++ = 0xc7;
break;
case HF64_ADD:
/* mov rdi, imm64 */
*buf++ = 0x48;
*buf++ = 0xbf;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
*buf++ = ops[i].constant >> 32;
*buf++ = ops[i].constant >> 40;
*buf++ = ops[i].constant >> 48;
*buf++ = ops[i].constant >> 56;
/* add rax, rdi */
*buf++ = 0x48;
*buf++ = 0x01;
*buf++ = 0xf8;
break;
case HF64_ROT:
/* rol rax, imm8 */
*buf++ = 0x48;
*buf++ = 0xc1;
*buf++ = 0xc0;
*buf++ = ops[i].constant;
break;
case HF64_XORL:
/* mov edi, eax */
*buf++ = 0x48;
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl rdi, imm8 */
*buf++ = 0x48;
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* xor rax, rdi */
*buf++ = 0x48;
*buf++ = 0x31;
*buf++ = 0xf8;
break;
case HF64_XORR:
/* mov rdi, rax */
*buf++ = 0x48;
*buf++ = 0x89;
*buf++ = 0xc7;
/* shr rdi, imm8 */
*buf++ = 0x48;
*buf++ = 0xc1;
*buf++ = 0xef;
*buf++ = ops[i].constant;
/* xor rax, rdi */
*buf++ = 0x48;
*buf++ = 0x31;
*buf++ = 0xf8;
break;
case HF64_ADDL:
/* mov rdi, rax */
*buf++ = 0x48;
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl rdi, imm8 */
*buf++ = 0x48;
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* add rax, rdi */
*buf++ = 0x48;
*buf++ = 0x01;
*buf++ = 0xf8;
break;
case HF64_SUBL:
/* mov rdi, rax */
*buf++ = 0x48;
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl rdi, imm8 */
*buf++ = 0x48;
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* sub rax, rdi */
*buf++ = 0x48;
*buf++ = 0x29;
*buf++ = 0xf8;
break;
}
}
/* ret */
*buf++ = 0xc3;
return buf;
}
/* Higher quality is slower but has more consistent results. */
static int score_quality = 18;
static uint64_t rng[2] = { 0x2a2bc037b59ff989, 0x6d7db86fa2f632ca };
/* Measures how each input bit affects each output bit. This measures
* both bias and avalanche.
*/
static double estimate_bias32(uint32_t (*f)(uint32_t))
{
long n = 1L << score_quality;
long bins[32][32] = { {0} };
for (long i = 0; i < n; i++) {
uint32_t x = xoroshiro128plus(rng);
uint32_t h0 = f(x);
for (int j = 0; j < 32; j++) {
uint32_t bit = UINT32_C(1) << j;
uint32_t h1 = f(x ^ bit);
uint32_t set = h0 ^ h1;
for (int k = 0; k < 32; k++)
bins[j][k] += (set >> k) & 1;
}
}
double mean = 0;
for (int j = 0; j < 32; j++) {
for (int k = 0; k < 32; k++) {
/* FIXME: normalize this somehow */
double diff = (bins[j][k] - n / 2) / (n / 2.0);
mean += (diff * diff) / (32 * 32);
}
}
return sqrt(mean) * 1000.0;
}
static double estimate_bias64(uint64_t (*f)(uint64_t))
{
long n = 1L << score_quality;
long bins[64][64] = { {0} };
for (long i = 0; i < n; i++) {
uint64_t x = xoroshiro128plus(rng);
uint64_t h0 = f(x);
for (int j = 0; j < 64; j++) {
uint64_t bit = UINT64_C(1) << j;
uint64_t h1 = f(x ^ bit);
uint64_t set = h0 ^ h1;
for (int k = 0; k < 64; k++)
bins[j][k] += (set >> k) & 1;
}
}
double mean = 0;
for (int j = 0; j < 64; j++) {
for (int k = 0; k < 64; k++) {
/* FIXME: normalize this somehow */
double diff = (bins[j][k] - n / 2) / (n / 2.0);
mean += (diff * diff) / (64 * 64);
}
}
return sqrt(mean) * 1000.0;
}
#define EXACT_SPLIT 32 // must be power of two
static double exact_bias32(uint32_t (*f)(uint32_t))
{
long long bins[32][32] = { {0} };
static const uint64_t range = (UINT64_C(1) << 32) / EXACT_SPLIT;
#pragma omp parallel for
for (int i = 0; i < EXACT_SPLIT; i++) {
long long b[32][32] = { {0} };
for (uint64_t x = i * range; x < (i + 1) * range; x++) {
uint32_t h0 = f(x);
for (int j = 0; j < 32; j++) {
uint32_t bit = UINT32_C(1) << j;
uint32_t h1 = f(x ^ bit);
uint32_t set = h0 ^ h1;
for (int k = 0; k < 32; k++)
b[j][k] += (set >> k) & 1;
}
}
#pragma omp critical
for (int j = 0; j < 32; j++)
for (int k = 0; k < 32; k++)
bins[j][k] += b[j][k];
}
double mean = 0.0;
for (int j = 0; j < 32; j++) {
for (int k = 0; k < 32; k++) {
double diff = (bins[j][k] - 2147483648L) / 2147483648.0;
mean += (diff * diff) / (32 * 32);
}
}
return sqrt(mean) * 1000.0;
}
static void usage(FILE* f)
{
printf("usage: prospector [-E|L|S] [-4|-8] [-ehs] [-l lib] [-p pattern] [-r n:m] [-t x]\n");
printf(" -4 Generate 32-bit hash functions (default)\n");
printf(" -8 Generate 64-bit hash functions\n");
printf(" -e Measure bias exactly (requires -E)\n");
printf(" -q n Score quality knob (12-30, default: 18)\n");
printf(" -s Don't use large constants\n");
printf(" -t x Initial score threshold [10.0]\n");
printf(" -E Single evaluation mode (requires -p or -l)\n");
printf(" -S Hash function search mode (default)\n");
printf(" -L Enumerate output mode (requires -p or -l)\n");
// fprintf(f, " -h Print this help message\n");
// fprintf(f, " -l ./lib.so Load hash() from a shared object\n");
// fprintf(f, " -p pattern Search only a given pattern\n");
// fprintf(f, " -r n:m Use between n and m operations [3:6]\n");
}
static int parse_operand(struct hf_op* op, char* buf)
{
op->flags |= FOP_LOCKED;
switch (op->type) {
case HF32_NOT: case HF64_NOT: case HF32_BSWAP: case HF64_BSWAP:
return 0;
case HF32_XOR: case HF32_MUL: case HF32_ADD: case HF64_XOR: case HF64_MUL: case HF64_ADD:
op->constant = strtoull(buf, 0, 16);
return 1;
case HF32_ROT: case HF32_XORL: case HF32_XORR: case HF32_ADDL:
case HF32_SUBL: case HF64_ROT: case HF64_XORL: case HF64_XORR:
case HF64_ADDL: case HF64_SUBL:
op->constant = atoi(buf);
return 1;
}
return 0;
}
static int parse_template(struct hf_op* ops, int n, char* templte, int flags)
{
int c = 0;
int offset = flags & F_U64 ? HF64_XOR : 0;
for (char* tok = strtok(templte, ","); tok; tok = strtok(0, ",")) {
if (c == n) return 0;
int found = 0;
size_t operand = strcspn(tok, ":");
int sep = tok[operand];
tok[operand] = 0;
ops[c].flags = 0;
for (int i = 0; i < countof(hf_names); i++) {
if (!strcmp(hf_names[i] + 2, tok)) {
found = 1;
ops[c].type = (hf_type)(i + offset);
break;
}
}
if (!found)
return 0;
if (sep == ':' && !parse_operand(ops + c, tok + operand + 1))
return 0;
c++;
}
return c;
}
#define NOMINMAX
#define WIN32_LEAN_AND_MEAN
#include <Windows.h>
void* execbuf_alloc()
{
return VirtualAlloc(NULL, 4096, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
}
static void execbuf_lock(void* buf, DWORD& oldProtect)
{
BOOL result;
// Determine the current protection state
MEMORY_BASIC_INFORMATION memInfo;
VirtualQuery(buf, &memInfo, sizeof(memInfo));
DWORD oldFlags = memInfo.Protect;
switch (oldFlags)
{
case PAGE_EXECUTE_READWRITE:
// No action required, already writable and executable
break;
case PAGE_READWRITE:
// Set the memory protection to writable and executable
result = VirtualProtect(buf, 4096, PAGE_EXECUTE_READWRITE, &oldProtect);
if (!result)
{
fprintf(stderr, "prospector: VirtualProtect(PAGE_EXECUTE_READWRITE) failed: %u\n", GetLastError());
exit(EXIT_FAILURE);
}
break;
case PAGE_EXECUTE_WRITECOPY:
case PAGE_READONLY:
case PAGE_EXECUTE_READ:
case PAGE_NOACCESS:
// Set the memory protection to writable and executable
result = VirtualProtect(buf, 4096, PAGE_EXECUTE_READWRITE, &oldProtect);
if (!result)
{
fprintf(stderr, "prospector: VirtualProtect(PAGE_EXECUTE_READWRITE) failed: %u\n", GetLastError());
exit(EXIT_FAILURE);
}
break;
}
}
static void execbuf_unlock(void* buf, DWORD& oldProtect)
{
// Set the memory protection back to readable and writables
VirtualProtect(buf, 4096, PAGE_READWRITE, &oldProtect);
}
#include <intrin.h>
int main(int argc, char** argv)
{
int nops = 0;
int min = 3;
int max = 6;
int flags = 0;
int use_exact = 0;
double best = 100.0;
char templ[40]{};
const char* pattern = "xorr:15,mul,xorr:14,mul,xorr:15";
memcpy(templ, pattern, strlen(pattern));
hf_op ops[32];
void* buf = execbuf_alloc();
DWORD protectionState = PAGE_READWRITE;
_rdseed64_step(rng);
_rdseed64_step(rng + 1);
enum { MODE_SEARCH, MODE_EVAL, MODE_LIST } mode = MODE_SEARCH;
int option = 0;
flags = 4;
// printf("is 4 bit or 8 bit?\n");
// scanf("%i", &option);
// flags &= option;
//
// printf("mode? search:0 eval:1 or list:2\n");
// scanf("%i", &mode);
// printf("score quality between 12 and 30?\n");
// scanf("%i", &score_quality);
// printf("dont use large constants?\n");
// scanf("%i", &option);
// flags |= F_TINY * option;
// printf("best? default 100.0: ");
// scanf("%d", &best);
/* Get a unique seed */
// rng[0] = rng[1] =
nops = parse_template(ops, countof(ops), templ, flags);
if (!nops) {
VirtualFree(buf, 0, MEM_RELEASE);
fprintf(stderr, "prospector: invalid template\n");
exit(EXIT_FAILURE);
}
if (mode == MODE_EVAL) {
double bias;
void* hashptr = 0;
hf_randfunc(ops, nops, rng);
hf_compile(ops, nops, (unsigned char*)buf);
execbuf_lock(buf, protectionState);
hashptr = buf;
CSTIMER("hash speed: ");
uint64_t nhash;
if (flags & F_U64) {
uint64_t (*hash)(uint64_t) = (uint64_t (*)(uint64_t))hashptr;
bias = estimate_bias64(hash);
nhash = (1L << score_quality) * 33;
}
else {
uint32_t (*hash)(uint32_t) = (uint32_t(*)(uint32_t))hashptr;
bias = estimate_bias32(hash);
nhash = (1L << score_quality) * 65;
}
hf_printfunc(ops, nops, stdout);
printf("bias = %.17g\n", bias);
VirtualFree(buf, 0, MEM_RELEASE);
return 0;
}
if (mode == MODE_LIST)
{
void* hashptr = 0;
hf_randfunc(ops, nops, rng);
hf_compile(ops, nops, (unsigned char*)buf);
execbuf_lock(buf, protectionState);
hashptr = buf;
if (flags & F_U64) {
uint64_t (*hash)(uint64_t) = (uint64_t(*)(uint64_t))hashptr;
uint64_t i = 0;
do
printf("%016llx %016llx\n",
(unsigned long long)i,
(unsigned long long)hash(i));
while (++i < 500);
}
else {
uint32_t (*hash)(uint32_t) = (uint32_t(*)(uint32_t))hashptr;
uint32_t i = 0;
do
printf("%08lx %08lx\n",
(unsigned long)i,
(unsigned long)hash(i));
while (++i < 500);
}
VirtualFree(buf, 0, MEM_RELEASE);
return 0;
}
for (;;) {
/* Generate */
_rdseed64_step(rng);
_rdseed64_step(rng + 1);
hf_randfunc(ops, nops, rng);
/* Evaluate */
double score;
hf_compile(ops, nops, (unsigned char*)buf);
execbuf_lock(buf, protectionState);
if (flags & F_U64) {
uint64_t (*hash)(uint64_t) = (uint64_t(*)(uint64_t))buf;
score = estimate_bias64(hash);
}
else {
uint32_t (*hash)(uint32_t) = (uint32_t(*)(uint32_t))buf;
score = estimate_bias32(hash);
}
execbuf_unlock(buf, protectionState);
/* Compare */
if (score < best) {
printf("// score = %.17g\n", score);
hf_printfunc(ops, nops, stdout);
fflush(stdout);
best = score;
}
}
VirtualFree(buf, 0, MEM_RELEASE);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment