Skip to content

Instantly share code, notes, and snippets.

@district10
Last active May 18, 2023 14:13
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save district10/9937754bdcdd5321a56430f491088de9 to your computer and use it in GitHub Desktop.
Save district10/9937754bdcdd5321a56430f491088de9 to your computer and use it in GitHub Desktop.
based on 20200225.2 (c51510d1). Don't forget to add `-Wl,--start-group`, `-Wl,--end-group` when linking. Related: https://github.com/abseil/abseil-cpp/issues/566#issuecomment-1013611434
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 48cb6eb..f730711 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -20,6 +20,15 @@
# (https://fedoraproject.org/wiki/EPEL#Extra_Packages_for_Enterprise_Linux_.28EPEL.29)
# and then issuing `yum install cmake3` on the command line.
cmake_minimum_required(VERSION 3.5)
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+if (CMAKE_SYSTEM_NAME STREQUAL "QNX")
+ add_definitions(-D_QNX_SOURCE)
+ # add_definitions(-D_XOPEN_SOURCE)
+ add_definitions(-D_XOPEN_SOURCE=500)
+ add_definitions(-DSIGSTKSZ=65536)
+ add_definitions(-DSA_ONSTACK=0x08000000)
+endif()
# Compiler id for Apple Clang is now AppleClang.
cmake_policy(SET CMP0025 NEW)
diff --git a/absl/base/config.h b/absl/base/config.h
index ee99f94..a7d7bd9 100644
--- a/absl/base/config.h
+++ b/absl/base/config.h
@@ -237,6 +237,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#else // !defined(__APPLE__)
#define ABSL_HAVE_THREAD_LOCAL 1
#endif
+#if defined(__QNX__)
+#undef ABSL_HAVE_THREAD_LOCAL
+#define ABSL_HAVE_THREAD_LOCAL 0
+#endif
// There are platforms for which TLS should not be used even though the compiler
// makes it seem like it's supported (Android NDK < r12b for example).
@@ -370,6 +374,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
defined(__ASYLO__)
#define ABSL_HAVE_MMAP 1
#endif
+#if defined(__QNX__)
+#undef ABSL_HAVE_MMAP
+#define ABSL_HAVE_MMAP 0
+#endif
// ABSL_HAVE_PTHREAD_GETSCHEDPARAM
//
@@ -381,6 +389,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
defined(__ros__)
#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
#endif
+#if defined(__QNX__)
+#undef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 0
+#endif
// ABSL_HAVE_SCHED_YIELD
//
@@ -391,6 +403,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#elif defined(__linux__) || defined(__ros__) || defined(__native_client__)
#define ABSL_HAVE_SCHED_YIELD 1
#endif
+#if defined(__QNX__)
+#undef ABSL_HAVE_SCHED_YIELD
+#define ABSL_HAVE_SCHED_YIELD 0
+#endif
// ABSL_HAVE_SEMAPHORE_H
//
@@ -406,6 +422,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#elif defined(__linux__) || defined(__ros__)
#define ABSL_HAVE_SEMAPHORE_H 1
#endif
+#if defined(__QNX__)
+#undef ABSL_HAVE_SEMAPHORE_H
+#define ABSL_HAVE_SEMAPHORE_H 0
+#endif
// ABSL_HAVE_ALARM
//
@@ -435,6 +455,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// other standard libraries
#define ABSL_HAVE_ALARM 1
#endif
+#if defined(__QNX__)
+#undef ABSL_HAVE_ALARM
+#define ABSL_HAVE_ALARM 0
+#endif
// ABSL_IS_LITTLE_ENDIAN
// ABSL_IS_BIG_ENDIAN
diff --git a/absl/base/internal/low_level_alloc.h b/absl/base/internal/low_level_alloc.h
index db91951..8dd837f 100644
--- a/absl/base/internal/low_level_alloc.h
+++ b/absl/base/internal/low_level_alloc.h
@@ -39,6 +39,10 @@
#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32)
#define ABSL_LOW_LEVEL_ALLOC_MISSING 1
#endif
+#if defined(__QNX__)
+#undef ABSL_LOW_LEVEL_ALLOC_MISSING
+#define ABSL_LOW_LEVEL_ALLOC_MISSING 1
+#endif
// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or
// asm.js / WebAssembly.
@@ -49,6 +53,10 @@
#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__)
#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
#endif
+#if defined(__QNX__)
+#undef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
+#endif
#include <cstddef>
diff --git a/absl/debugging/failure_signal_handler.cc b/absl/debugging/failure_signal_handler.cc
index 1f69bfa..f860605 100644
--- a/absl/debugging/failure_signal_handler.cc
+++ b/absl/debugging/failure_signal_handler.cc
@@ -165,9 +165,13 @@ static bool SetupAlternateStackOnce() {
}
#endif
+#if defined(__QNX__)
+ // ABSL_RAW_LOG(FATAL, "sigaltstack() not supported");
+#else
if (sigaltstack(&sigstk, nullptr) != 0) {
ABSL_RAW_LOG(FATAL, "sigaltstack() failed with errno=%d", errno);
}
+#endif
return true;
}
diff --git a/absl/debugging/internal/elf_mem_image.h b/absl/debugging/internal/elf_mem_image.h
index 46bfade..d236b7b 100644
--- a/absl/debugging/internal/elf_mem_image.h
+++ b/absl/debugging/internal/elf_mem_image.h
@@ -38,7 +38,11 @@
#ifdef ABSL_HAVE_ELF_MEM_IMAGE
+#if defined(__QNX__)
+#include <sys/link.h>
+#else
#include <link.h> // for ElfW
+#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
diff --git a/absl/synchronization/internal/mutex_nonprod.cc b/absl/synchronization/internal/mutex_nonprod.cc
index 4590b98..a99eb9b 100644
--- a/absl/synchronization/internal/mutex_nonprod.cc
+++ b/absl/synchronization/internal/mutex_nonprod.cc
@@ -163,10 +163,12 @@ void CondVarImpl::Wait(MutexImpl* mu) {
bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) {
mu->released_.SignalAll();
+#if (!defined(__QNX__))
struct timespec ts = ToTimespec(deadline);
int rc = pthread_cond_timedwait(&pthread_cv_, &mu->pthread_mutex_, &ts);
if (rc == ETIMEDOUT) return true;
ABSL_RAW_CHECK(rc == 0, "pthread error");
+#endif
return false;
}
diff --git a/absl/synchronization/internal/mutex_nonprod.inc b/absl/synchronization/internal/mutex_nonprod.inc
index a1502e7..7b5f0af 100644
--- a/absl/synchronization/internal/mutex_nonprod.inc
+++ b/absl/synchronization/internal/mutex_nonprod.inc
@@ -216,7 +216,11 @@ class SynchronizationStorage {
explicit SynchronizationStorage(base_internal::LinkerInitialized) {}
constexpr explicit SynchronizationStorage(absl::ConstInitType)
+#if defined(__QNX__)
+ : is_dynamic_(false), once_(), space_{0} {}
+#else
: is_dynamic_(false), once_(), space_{{0}} {}
+#endif
SynchronizationStorage(SynchronizationStorage&) = delete;
SynchronizationStorage& operator=(SynchronizationStorage&) = delete;
diff --git a/absl/synchronization/internal/waiter.cc b/absl/synchronization/internal/waiter.cc
index 2949f5a..328aaee 100644
--- a/absl/synchronization/internal/waiter.cc
+++ b/absl/synchronization/internal/waiter.cc
@@ -250,6 +250,9 @@ bool Waiter::Wait(KernelTimeout t) {
ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
}
} else {
+#if defined(__QNX__)
+ return false;
+#else
const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
if (err == ETIMEDOUT) {
--waiter_count_;
@@ -258,6 +261,7 @@ bool Waiter::Wait(KernelTimeout t) {
if (err != 0) {
ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
}
+#endif
}
first_pass = false;
}
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index e0879b0..3e1848f 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -545,6 +545,7 @@ static SynchLocksHeld *Synch_GetAllLocks() {
return s->all_locks;
}
+#if (!defined(__QNX__))
// Post on "w"'s associated PerThreadSem.
inline void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
if (mu) {
@@ -569,6 +570,7 @@ bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
}
return res;
}
+#endif
// We're in a fatal signal handler that hopes to use Mutex and to get
// lucky by not deadlocking. We try to improve its chances of success
@@ -715,10 +717,12 @@ static bool DebugOnlyIsExiting() {
}
Mutex::~Mutex() {
+#if (!defined(__QNX__))
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
}
+#endif
if (kDebugMode) {
this->ForgetDeadlockInfo();
}
@@ -726,9 +730,11 @@ Mutex::~Mutex() {
}
void Mutex::EnableDebugLog(const char *name) {
+#if (!defined(__QNX__))
SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true;
UnrefSynchEvent(e);
+#endif
}
void EnableMutexInvariantDebugging(bool enabled) {
@@ -737,6 +743,7 @@ void EnableMutexInvariantDebugging(bool enabled) {
void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
void *arg) {
+#if (!defined(__QNX__))
if (synch_check_invariants.load(std::memory_order_acquire) &&
invariant != nullptr) {
SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
@@ -744,6 +751,7 @@ void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
e->arg = arg;
UnrefSynchEvent(e);
}
+#endif
}
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
@@ -1054,6 +1062,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list.
+#if (!defined(__QNX__))
void Mutex::TryRemove(PerThreadSynch *s) {
intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock
@@ -1185,6 +1194,7 @@ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
held_locks->locks[i].count++;
}
}
+#endif
// Record a lock release. Each call to LockEnter(mu, id, x) should be
// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
@@ -1225,6 +1235,7 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
}
}
+#if (!defined(__QNX__))
// Call LockEnter() if in debug mode and deadlock detection is enabled.
static inline void DebugOnlyLockEnter(Mutex *mu) {
if (kDebugMode) {
@@ -1254,6 +1265,7 @@ static inline void DebugOnlyLockLeave(Mutex *mu) {
}
}
}
+#endif
static char *StackString(void **pcs, int n, char *buf, int maxlen,
bool symbolize) {
@@ -1310,6 +1322,9 @@ int GetStack(void** stack, int max_depth) {
// Called in debug mode when a thread is about to acquire a lock in a way that
// may block.
static GraphId DeadlockCheck(Mutex *mu) {
+#if defined(__QNX__)
+ return InvalidGraphId();
+#else
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kIgnore) {
return InvalidGraphId();
@@ -1391,6 +1406,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
}
return mu_id;
+#endif
}
// Invoke DeadlockCheck() iff we're in debug mode and
@@ -1418,6 +1434,7 @@ void Mutex::ForgetDeadlockInfo() {
void Mutex::AssertNotHeld() const {
// We have the data to allow this check only if in debug mode and deadlock
// detection is enabled.
+#if (!defined(__QNX__))
if (kDebugMode &&
(mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
synch_deadlock_detection.load(std::memory_order_acquire) !=
@@ -1433,6 +1450,7 @@ void Mutex::AssertNotHeld() const {
}
}
}
+#endif
}
// Attempt to acquire *mu, and return whether successful. The implementation
@@ -1458,6 +1476,7 @@ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this);
+#if (!defined(__QNX__))
intptr_t v = mu_.load(std::memory_order_relaxed);
// try fast acquire, then spin loop
if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
@@ -1471,9 +1490,11 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
}
DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+#endif
}
ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
+#if (!defined(__QNX__))
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1486,16 +1507,20 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
}
DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+#endif
}
void Mutex::LockWhen(const Condition &cond) {
+#if (!defined(__QNX__))
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this);
this->LockSlow(kExclusive, &cond, 0);
DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+#endif
}
+#if (!defined(__QNX__))
bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
}
@@ -1532,8 +1557,10 @@ bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
return res;
}
+#endif
void Mutex::Await(const Condition &cond) {
+#if (!defined(__QNX__))
if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) {
this->AssertReaderHeld();
@@ -1542,6 +1569,7 @@ void Mutex::Await(const Condition &cond) {
ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
"condition untrue on return from Await");
}
+#endif
}
bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
@@ -1549,6 +1577,9 @@ bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
}
bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
+#if defined(__QNX__)
+ return true;
+#else
if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) {
this->AssertReaderHeld();
@@ -1561,8 +1592,10 @@ bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
ABSL_RAW_CHECK(res || t.has_timeout(),
"condition untrue on return from Await");
return res;
+#endif
}
+#if (!defined(__QNX__))
bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
this->AssertReaderHeld();
MuHow how =
@@ -1585,8 +1618,10 @@ bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
return res;
}
+#endif
ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
+#if (!defined(__QNX__))
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
@@ -1613,9 +1648,11 @@ ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
ABSL_TSAN_MUTEX_POST_LOCK(
this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
return false;
+#endif
}
ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
+#if (!defined(__QNX__))
ABSL_TSAN_MUTEX_PRE_LOCK(this,
__tsan_mutex_read_lock | __tsan_mutex_try_lock);
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1659,9 +1696,11 @@ ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
__tsan_mutex_try_lock_failed,
0);
return false;
+#endif
}
ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
+#if (!defined(__QNX__))
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1700,6 +1739,7 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
this->UnlockSlow(nullptr /*no waitp*/); // take slow path
}
ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
+#endif
}
// Requires v to represent a reader-locked state.
@@ -1714,6 +1754,7 @@ static bool ExactlyOneReader(intptr_t v) {
}
ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
+#if (!defined(__QNX__))
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1730,6 +1771,7 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
}
this->UnlockSlow(nullptr /*no waitp*/); // take slow path
ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
+#endif
}
// The zap_desig_waker bitmask is used to clear the designated waker flag in
@@ -1750,12 +1792,14 @@ static const intptr_t ignore_waiting_writers[] = {
kMuWrWait) // blocked; pretend there are no waiting writers
};
+#if (!defined(__QNX__))
// Internal version of LockWhen(). See LockSlowWithDeadline()
void Mutex::LockSlow(MuHow how, const Condition *cond, int flags) {
ABSL_RAW_CHECK(
this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
"condition untrue on return from LockSlow");
}
+#endif
// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
@@ -1833,6 +1877,7 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
// obstruct this call
// - kMuIsCond indicates that this is a conditional acquire (condition variable,
// Await, LockWhen) so contention profiling should be suppressed.
+#if (!defined(__QNX__))
bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
KernelTimeout t, int flags) {
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1864,6 +1909,7 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
cond == nullptr ||
EvalConditionAnnotated(cond, this, true, false, how == kShared);
}
+#endif
// RAW_CHECK_FMT() takes a condition, a printf-style format string, and
// the printf-style argument list. The format string must be a literal.
@@ -1898,6 +1944,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
assert(false);
}
+#if (!defined(__QNX__))
void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -2011,12 +2058,15 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
SYNCH_EV_READERLOCK_RETURNING);
}
}
+#endif
// Unlock this mutex, which is held by the current thread.
// If waitp is non-zero, it must be the wait parameters for the current thread
// which holds the lock but is not runnable because its condition is false
// or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true.
+
+#if (!defined(__QNX__))
void Mutex::UnlockSlow(SynchWaitParams *waitp) {
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
@@ -2314,6 +2364,7 @@ void Mutex::UnlockSlow(SynchWaitParams *waitp) {
}
}
}
+#endif
// Used by CondVar implementation to reacquire mutex after waking from
// condition variable. This routine is used instead of Lock() because the
@@ -2324,14 +2375,17 @@ void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// in fact it has just been woken from the mutex. Thus, it must enter the slow
// path of the mutex in the same state as if it had just woken from the mutex.
// That is, it must ensure to clear kMuDesig (INV1b).
+#if (!defined(__QNX__))
void Mutex::Trans(MuHow how) {
this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
}
+#endif
// Used by CondVar implementation to effectively wake thread w from the
// condition variable. If this mutex is free, we simply wake the thread.
// It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex.
+#if (!defined(__QNX__))
void Mutex::Fer(PerThreadSynch *w) {
int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr,
@@ -2385,23 +2439,28 @@ void Mutex::Fer(PerThreadSynch *w) {
c = Delay(c, GENTLE);
}
}
+#endif
void Mutex::AssertHeld() const {
+#if (!defined(__QNX__))
if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
SynchEvent *e = GetSynchEvent(this);
ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
static_cast<const void *>(this),
(e == nullptr ? "" : e->name));
}
+#endif
}
void Mutex::AssertReaderHeld() const {
+#if (!defined(__QNX__))
if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
SynchEvent *e = GetSynchEvent(this);
ABSL_RAW_LOG(
FATAL, "thread should hold at least a read lock on Mutex %p %s",
static_cast<const void *>(this), (e == nullptr ? "" : e->name));
}
+#endif
}
// -------------------------------- condition variables
@@ -2417,18 +2476,23 @@ static_assert(PerThreadSynch::kAlignment > kCvLow,
"PerThreadSynch::kAlignment must be greater than kCvLow");
void CondVar::EnableDebugLog(const char *name) {
+#if (!defined(__QNX__))
SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
e->log = true;
UnrefSynchEvent(e);
+#endif
}
CondVar::~CondVar() {
+#if (!defined(__QNX__))
if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
}
+#endif
}
+#if (!defined(__QNX__))
// Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch *s) {
intptr_t v;
@@ -2463,6 +2527,7 @@ void CondVar::Remove(PerThreadSynch *s) {
}
}
}
+#endif
// Queue thread waitp->thread on condition variable word cv_word using
// wait parameters waitp.
@@ -2510,6 +2575,7 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
std::memory_order_release);
}
+#if (!defined(__QNX__))
bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
bool rc = false; // return value; true iff we timed-out
@@ -2560,23 +2626,31 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
return rc;
}
+#endif
bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
- return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+ // return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
}
bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
+#if defined(__QNX__)
+ return false;
+#else
return WaitCommon(mu, KernelTimeout(deadline));
+#endif
}
void CondVar::Wait(Mutex *mu) {
+#if (!defined(__QNX__))
WaitCommon(mu, KernelTimeout::Never());
+#endif
}
// Wake thread w
// If it was a timed wait, w will be waiting on w->cv
// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
+#if (!defined(__QNX__))
void CondVar::Wakeup(PerThreadSynch *w) {
if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
// The waiting thread only needs to observe "w->state == kAvailable" to be
@@ -2589,8 +2663,10 @@ void CondVar::Wakeup(PerThreadSynch *w) {
w->waitp->cvmu->Fer(w);
}
}
+#endif
void CondVar::Signal() {
+#if (!defined(__QNX__))
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
@@ -2627,9 +2703,11 @@ void CondVar::Signal() {
}
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
+#endif
}
void CondVar::SignalAll () {
+#if (!defined(__QNX__))
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
@@ -2664,6 +2742,7 @@ void CondVar::SignalAll () {
}
}
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
+#endif
}
void ReleasableMutexLock::Release() {
View raw

(Sorry about that, but we can’t show files that are this big right now.)

View raw

(Sorry about that, but we can’t show files that are this big right now.)

View raw

(Sorry about that, but we can’t show files that are this big right now.)

View raw

(Sorry about that, but we can’t show files that are this big right now.)

View raw

(Sorry about that, but we can’t show files that are this big right now.)

@district10
Copy link
Author

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment