Skip to content

Instantly share code, notes, and snippets.

@baiyanhuang
Created January 31, 2019 13:36
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save baiyanhuang/efa140ddfa98f01351081c0672b736df to your computer and use it in GitHub Desktop.
Save baiyanhuang/efa140ddfa98f01351081c0672b736df to your computer and use it in GitHub Desktop.
#pragma once
#include <atomic>
#include <algorithm>
#include "AlignAs.H"
/*
* 1. release-acquire memory order
* 2. false sharing and cache line
* 3. alignas
*
*/
/*
* No matter where the alignment starts, below structure makes sure _lock is the only data
* in one cache line, hence there is no false sharing
* |-----------64-------------|
* | __padding_before | _lock | __padding_after |
* |-----------64-------------|
*
*/
template <size_t N>
struct DoublePadding
{
constexpr static size_t paddingSize() {return std::max(N, sizeof(std::atomic_flag));}
char __padding_before[paddingSize()];
std::atomic_flag _lock = ATOMIC_FLAG_INIT;
char __padding_after[paddingSize()];
};
/*
* Align to cacheline
*/
template <size_t N>
struct AlignPadding: public AlignAs<N>
{
constexpr static size_t paddingSize() {return std::max(N, sizeof(std::atomic_flag));}
std::atomic_flag _lock = ATOMIC_FLAG_INIT;
char __padding[paddingSize()];
};
template <class PaddingPolicy>
class SpinLockT: public PaddingPolicy
{
public:
/*
* if test_and_set()
* returns false, meaning not locked, lock it and return
* returns true, meaning already locked, busy waiting
*/
void lock() { while( this->_lock.test_and_set(std::memory_order_acquire)); }
void unlock() { this->_lock.clear(std::memory_order_release); }
};
constexpr size_t CACHELINE_SIZE = 64;
using SpinLock = SpinLockT<AlignPadding<CACHELINE_SIZE>>;
using SpinLock2 = SpinLockT<DoublePadding<CACHELINE_SIZE>>;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment