Skip to content

Instantly share code, notes, and snippets.

@jasonLaster
Created March 30, 2021 02:09
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jasonLaster/212d590f16559d725452994cfbc58382 to your computer and use it in GitHub Desktop.
Save jasonLaster/212d590f16559d725452994cfbc58382 to your computer and use it in GitHub Desktop.
diff --git a/base/BUILD.gn b/base/BUILD.gn
index 7be3ca12455f..e81a29c97627 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -236,6 +236,7 @@ component("base") {
"debug/task_trace.h",
"deferred_sequenced_task_runner.cc",
"deferred_sequenced_task_runner.h",
+ "deterministic_containers.h",
"enterprise_util.h",
"environment.cc",
"environment.h",
@@ -498,6 +499,8 @@ component("base") {
"ranges/algorithm.h",
"ranges/functional.h",
"ranges/ranges.h",
+ "record_replay.cc",
+ "record_replay.h",
"run_loop.cc",
"run_loop.h",
"sampling_heap_profiler/lock_free_address_hash_set.cc",
diff --git a/base/allocator/allocator_interception_mac.mm b/base/allocator/allocator_interception_mac.mm
index 3d7299c06738..a7af31a6edab 100644
--- a/base/allocator/allocator_interception_mac.mm
+++ b/base/allocator/allocator_interception_mac.mm
@@ -365,6 +365,12 @@ void InterceptAllocationsMac() {
g_oom_killer_enabled = true;
+ // Don't alter memory allocation behavior when recording/replaying.
+ if (getenv("RECORD_REPLAY_DRIVER")) {
+ g_replaced_default_zone = true;
+ return;
+ }
+
// === C malloc/calloc/valloc/realloc/posix_memalign ===
// This approach is not perfect, as requests for amounts of memory larger than
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
index e085adfe5962..a32566754d0a 100644
--- a/base/allocator/allocator_shim.cc
+++ b/base/allocator/allocator_shim.cc
@@ -372,6 +372,11 @@ ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
namespace base {
namespace allocator {
void InitializeAllocatorShim() {
+ // Don't alter memory allocation behavior when recording/replaying.
+ if (getenv("RECORD_REPLAY_DRIVER")) {
+ return;
+ }
+
// Prepares the default dispatch. After the intercepted malloc calls have
// traversed the shim this will route them to the default malloc zone.
InitializeDefaultDispatchToMacAllocator();
diff --git a/base/debug/debugger_posix.cc b/base/debug/debugger_posix.cc
index b2e8ae3ba63d..5a2bacd64c38 100644
--- a/base/debug/debugger_posix.cc
+++ b/base/debug/debugger_posix.cc
@@ -137,6 +137,8 @@ bool BeingDebugged() {
}
void VerifyDebugger() {
+ return;
+ /*
#if BUILDFLAG(ENABLE_LLDBINIT_WARNING)
if (Environment::Create()->HasVar("CHROMIUM_LLDBINIT_SOURCED"))
return;
@@ -151,6 +153,7 @@ void VerifyDebugger() {
"To continue anyway, type 'continue' in lldb. To always skip this "
"check, define an environment variable CHROMIUM_LLDBINIT_SOURCED=1";
#endif
+ */
}
#elif defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_ANDROID) || \
diff --git a/base/deterministic_containers.h b/base/deterministic_containers.h
new file mode 100644
index 000000000000..3d1f70d7aaa8
--- /dev/null
+++ b/base/deterministic_containers.h
@@ -0,0 +1,185 @@
+// Copyright (c) 2021 Record Replay Inc.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DETERMINISTIC_CONTAINERS_H_
+#define BASE_DETERMINISTIC_CONTAINERS_H_
+
+// When recording/replaying, sometimes containers must be iterated in the same
+// order when replaying as they did when recording in order for behavior to
+// remain the same. STL unordered containers (aka hash tables) do not ensure
+// this. This file defines replacements for these containers that have the
+// same interface but always iterate their contents in insertion order.
+
+namespace base {
+
+template <typename Key,
+ typename T,
+ typename Hash = std::hash<Key>,
+ typename EqualTo = std::equal_to<Key>>
+class deterministic_unordered_map {
+ // All entries in the map in insertion order. Entries which have been erased
+ // are empty. If the map has many erasures over time then this vector will
+ // grow without bound. It would be nice to occasionally clean out these old
+ // entries.
+ typedef std::vector<base::Optional<std::pair<Key, T>>> InnerVector;
+ InnerVector vector_;
+
+ // Map all keys in the map to indexes in vector_.
+ typedef std::unordered_map<Key, size_t, Hash, EqualTo> InnerMap;
+ InnerMap map_;
+
+ public:
+ struct iterator : std::iterator<std::forward_iterator_tag,
+ std::pair<Key, T>,
+ long,
+ std::pair<Key, T>*,
+ std::pair<Key, T>&> {
+ size_t index_;
+ InnerVector& vector_;
+ iterator(size_t index, InnerVector& vector)
+ : index_(index), vector_(vector) {}
+
+ iterator& operator++() {
+ index_++;
+ while (index_ < vector_.size() && !vector_[index_].has_value()) {
+ index_++;
+ }
+ return *this;
+ }
+
+ iterator& operator++(int) {
+ iterator tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+
+ bool operator !=(const iterator& o) const {
+ CHECK(&vector_ == &o.vector_);
+ return index_ != o.index_;
+ }
+
+ bool operator ==(const iterator& o) const {
+ CHECK(&vector_ == &o.vector_);
+ return index_ == o.index_;
+ }
+
+ std::pair<Key, T>& operator *() const {
+ return vector_[index_].value();
+ }
+
+ std::pair<Key, T>* operator ->() const {
+ return &vector_[index_].value();
+ }
+ };
+
+ struct const_iterator : std::iterator<std::forward_iterator_tag,
+ std::pair<Key, T>,
+ long,
+ const std::pair<Key, T>*,
+ const std::pair<Key, T>&> {
+ size_t index_;
+ const InnerVector& vector_;
+ const_iterator(size_t index, const InnerVector& vector)
+ : index_(index), vector_(vector) {}
+
+ const_iterator& operator++() {
+ index_++;
+ while (index_ < vector_.size() && !vector_[index_].has_value()) {
+ index_++;
+ }
+ return *this;
+ }
+
+ const_iterator& operator++(int) {
+ const_iterator tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+
+ bool operator !=(const const_iterator& o) const {
+ CHECK(&vector_ == &o.vector_);
+ return index_ != o.index_;
+ }
+
+ bool operator ==(const const_iterator& o) const {
+ CHECK(&vector_ == &o.vector_);
+ return index_ == o.index_;
+ }
+
+ const std::pair<Key, T>& operator *() const {
+ return vector_[index_].value();
+ }
+
+ const std::pair<Key, T>* operator ->() const {
+ return &vector_[index_].value();
+ }
+ };
+
+ iterator find(const Key& k) {
+ auto iter = map_.find(k);
+ return iter != map_.end() ? iterator(iter->second, vector_) : end();
+ }
+
+ const_iterator find(const Key& k) const {
+ auto iter = map_.find(k);
+ return iter != map_.end() ? const_iterator(iter->second, vector_) : end();
+ }
+
+ iterator begin() {
+ for (size_t i = 0; i < vector_.size(); i++) {
+ if (vector_[i].has_value()) {
+ return iterator(i, vector_);
+ }
+ }
+ return end();
+ }
+
+ const_iterator begin() const {
+ for (size_t i = 0; i < vector_.size(); i++) {
+ if (vector_[i].has_value()) {
+ return const_iterator(i, vector_);
+ }
+ }
+ return end();
+ }
+
+ iterator end() { return iterator(vector_.size(), vector_); }
+ const_iterator end() const { return const_iterator(vector_.size(), vector_); }
+
+ T& operator[](const Key& k) {
+ auto iter = find(k);
+ return (iter != end()) ? iter->second : insert(k, T()).first->second;
+ }
+
+ std::pair<iterator, bool> insert(const Key& k, const T& v) {
+ auto iter = find(k);
+ if (iter != end()) {
+ return { iter, false };
+ }
+ size_t index = vector_.size();
+ vector_.emplace_back();
+ vector_.back().emplace(k, v);
+ map_[k] = index;
+ return { iterator(vector_.size() - 1, vector_), true };
+ }
+
+ size_t erase(const Key& k) {
+ auto iter = map_.find(k);
+ if (iter == map_.end()) {
+ return 0;
+ }
+ CHECK(iter->second < vector_.size() && vector_[iter->second].has_value());
+ vector_[iter->second].reset();
+ map_.erase(iter);
+ return 1;
+ }
+
+ size_t size() const {
+ return map_.size();
+ }
+};
+
+} // namespace base
+
+#endif // BASE_DETERMINISTIC_CONTAINERS_H_
diff --git a/base/logging.cc b/base/logging.cc
index 1eaa2ff69136..c8a0a9541a05 100644
--- a/base/logging.cc
+++ b/base/logging.cc
@@ -26,6 +26,10 @@
#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
+#ifdef OS_MAC
+#include <dlfcn.h>
+#endif
+
#if defined(OS_WIN)
#include <io.h>
#include <windows.h>
@@ -885,8 +889,31 @@ LogMessage::~LogMessage() {
}
}
+#ifdef OS_MAC
+static void (*gRecordReplayPrintFn)(const char*, va_list);
+#endif
+
+static void RecordReplayPrint(const char* aFormat, ...) {
+#ifdef OS_MAC
+ if (!gRecordReplayPrintFn) {
+ void* fnptr = dlsym(RTLD_DEFAULT, "RecordReplayPrint");
+ if (!fnptr) {
+ return;
+ }
+ gRecordReplayPrintFn = reinterpret_cast<void(*)(const char*, va_list)>(fnptr);
+ }
+
+ va_list ap;
+ va_start(ap, aFormat);
+ gRecordReplayPrintFn(aFormat, ap);
+ va_end(ap);
+#endif
+}
+
// writes the common header info to the stream
void LogMessage::Init(const char* file, int line) {
+ RecordReplayPrint("LogMessage::Init %s:%d", file, line);
+
base::StringPiece filename(file);
size_t last_slash_pos = filename.find_last_of("\\/");
if (last_slash_pos != base::StringPiece::npos)
diff --git a/base/memory/platform_shared_memory_region_mac.cc b/base/memory/platform_shared_memory_region_mac.cc
index e3754f8815d3..eb03955864e7 100644
--- a/base/memory/platform_shared_memory_region_mac.cc
+++ b/base/memory/platform_shared_memory_region_mac.cc
@@ -10,6 +10,7 @@
#include "base/mac/scoped_mach_vm.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
+#include "base/record_replay.h"
#include "build/build_config.h"
#if defined(OS_IOS)
@@ -209,7 +210,11 @@ bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
bool is_read_only = kr == KERN_INVALID_RIGHT;
bool expected_read_only = mode == Mode::kReadOnly;
- if (is_read_only != expected_read_only) {
+ // mach_vm_map doesn't behave identically when replaying, so we manually
+ // enforce that the match result is consistent.
+ bool mismatch = recordreplay::RecordReplayValue("CheckPlatformHandlePermissionsCorrespondToMode",
+ is_read_only != expected_read_only);
+ if (mismatch) {
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
LOG(ERROR) << "VM region has a wrong protection mask: it is"
<< (is_read_only ? " " : " not ") << "read-only but it should"
diff --git a/base/memory/read_only_shared_memory_region.cc b/base/memory/read_only_shared_memory_region.cc
index 07cd1249f0d2..12684b511927 100644
--- a/base/memory/read_only_shared_memory_region.cc
+++ b/base/memory/read_only_shared_memory_region.cc
@@ -6,6 +6,7 @@
#include <utility>
+#include "base/record_replay.h"
#include "build/build_config.h"
namespace base {
@@ -82,6 +83,10 @@ ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::MapAt(
if (!handle_.MapAt(offset, size, &memory, &mapped_size))
return {};
+#ifdef OS_MAC
+ recordreplay::RecordReplayBytes("ReadOnlySharedMemoryRegion::MapAt", memory, size);
+#endif
+
return ReadOnlySharedMemoryMapping(memory, size, mapped_size,
handle_.GetGUID());
}
diff --git a/base/message_loop/message_pump_kqueue.cc b/base/message_loop/message_pump_kqueue.cc
index 9a0be64d50a8..63ea116dec6d 100644
--- a/base/message_loop/message_pump_kqueue.cc
+++ b/base/message_loop/message_pump_kqueue.cc
@@ -12,6 +12,7 @@
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_nsautorelease_pool.h"
#include "base/posix/eintr_wrapper.h"
+#include "base/record_replay.h"
namespace base {
@@ -145,15 +146,28 @@ MessagePumpKqueue::~MessagePumpKqueue() {}
void MessagePumpKqueue::Run(Delegate* delegate) {
AutoReset<bool> reset_keep_running(&keep_running_, true);
+ recordreplay::Assert("MessagePumpKqueue::Run Start %d", keep_running_);
+
while (keep_running_) {
mac::ScopedNSAutoreleasePool pool;
+ recordreplay::Assert("MessagePumpKqueue::Run #1");
+
bool do_more_work = DoInternalWork(nullptr);
+
+ recordreplay::Assert("MessagePumpKqueue::Run #2 %d %d", do_more_work, keep_running_);
+
if (!keep_running_)
break;
Delegate::NextWorkInfo next_work_info = delegate->DoWork();
+
+ recordreplay::Assert("MessagePumpKqueue::Run #2.0 %d", next_work_info.is_immediate());
+
do_more_work |= next_work_info.is_immediate();
+
+ recordreplay::Assert("MessagePumpKqueue::Run #2.1 %d %d", do_more_work, keep_running_);
+
if (!keep_running_)
break;
@@ -161,12 +175,17 @@ void MessagePumpKqueue::Run(Delegate* delegate) {
continue;
do_more_work |= delegate->DoIdleWork();
+
+ recordreplay::Assert("MessagePumpKqueue::Run #2.2 %d %d", do_more_work, keep_running_);
+
if (!keep_running_)
break;
if (do_more_work)
continue;
+ recordreplay::Assert("MessagePumpKqueue::Run #3");
+
DoInternalWork(&next_work_info);
}
}
@@ -389,6 +408,7 @@ bool MessagePumpKqueue::DoInternalWork(Delegate::NextWorkInfo* next_work_info) {
}
bool MessagePumpKqueue::ProcessEvents(int count) {
+ recordreplay::Assert("MessagePumpKqueue::ProcessEvents Start");
bool did_work = false;
for (int i = 0; i < count; ++i) {
@@ -443,13 +463,16 @@ bool MessagePumpKqueue::ProcessEvents(int count) {
// The controller could have been removed by some other work callout
// before this event could be processed.
if (controller) {
+ recordreplay::Assert("MessagePumpKqueue::ProcessEvents #1");
controller->watcher()->OnMachMessageReceived(port);
+ recordreplay::Assert("MessagePumpKqueue::ProcessEvents #2");
}
} else {
NOTREACHED() << "Unexpected event for filter " << event->filter;
}
}
+ recordreplay::Assert("MessagePumpKqueue::ProcessEvents Done %d", did_work);
return did_work;
}
diff --git a/base/message_loop/message_pump_mac.mm b/base/message_loop/message_pump_mac.mm
index 1d0ac66a9174..29dd0b2c0e32 100644
--- a/base/message_loop/message_pump_mac.mm
+++ b/base/message_loop/message_pump_mac.mm
@@ -16,6 +16,7 @@
#include "base/mac/scoped_cftyperef.h"
#include "base/message_loop/timer_slack.h"
#include "base/notreached.h"
+#include "base/record_replay.h"
#include "base/run_loop.h"
#include "base/stl_util.h"
#include "base/time/time.h"
@@ -181,6 +182,7 @@ void MessagePumpCFRunLoopBase::ScheduleDelayedWorkImpl(TimeDelta delta) {
} else {
CFRunLoopTimerSetTolerance(delayed_work_timer_, 0);
}
+ recordreplay::Assert("ScheduleDelayedWorkImpl %.2f", delta.InSecondsF());
CFRunLoopTimerSetNextFireDate(
delayed_work_timer_, CFAbsoluteTimeGetCurrent() + delta.InSecondsF());
}
@@ -330,6 +332,8 @@ void MessagePumpCFRunLoopBase::RunDelayedWorkTimer(CFRunLoopTimerRef timer,
// Called from the run loop.
// static
void MessagePumpCFRunLoopBase::RunWorkSource(void* info) {
+ recordreplay::NewCheckpoint();
+
MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
base::mac::CallWithEHFrame(^{
self->RunWork();
@@ -599,11 +603,14 @@ MessagePumpNSRunLoop::~MessagePumpNSRunLoop() {
}
void MessagePumpNSRunLoop::DoRun(Delegate* delegate) {
+ recordreplay::Assert("MessagePumpNSRunLoop::DoRun Start");
while (keep_running()) {
// NSRunLoop manages autorelease pools itself.
+ recordreplay::Assert("MessagePumpNSRunLoop::DoRun #1");
[[NSRunLoop currentRunLoop] runMode:NSDefaultRunLoopMode
beforeDate:[NSDate distantFuture]];
}
+ recordreplay::Assert("MessagePumpNSRunLoop::Done");
}
bool MessagePumpNSRunLoop::DoQuit() {
diff --git a/base/observer_list_threadsafe.h b/base/observer_list_threadsafe.h
index 0148efe63c95..94808fd06755 100644
--- a/base/observer_list_threadsafe.h
+++ b/base/observer_list_threadsafe.h
@@ -12,6 +12,7 @@
#include "base/base_export.h"
#include "base/bind.h"
#include "base/check_op.h"
+#include "base/deterministic_containers.h"
#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/memory/ref_counted.h"
@@ -94,9 +95,9 @@ class BASE_EXPORT ObserverListThreadSafeBase
template <class ObserverType>
class ObserverListThreadSafe : public internal::ObserverListThreadSafeBase {
public:
- ObserverListThreadSafe() = default;
+ ObserverListThreadSafe() : lock_("ObserverListThreadSafe.lock_") {}
explicit ObserverListThreadSafe(ObserverListPolicy policy)
- : policy_(policy) {}
+ : policy_(policy), lock_("ObserverListThreadSafe.lock_") {}
ObserverListThreadSafe(const ObserverListThreadSafe&) = delete;
ObserverListThreadSafe& operator=(const ObserverListThreadSafe&) = delete;
@@ -261,7 +262,7 @@ class ObserverListThreadSafe : public internal::ObserverListThreadSafeBase {
// Keys are observers. Values are the SequencedTaskRunners on which they must
// be notified.
- std::unordered_map<ObserverType*, scoped_refptr<SequencedTaskRunner>>
+ deterministic_unordered_map<ObserverType*, scoped_refptr<SequencedTaskRunner>>
observers_ GUARDED_BY(lock_);
};
diff --git a/base/process/process_posix.cc b/base/process/process_posix.cc
index 79235e08d750..1c60859bfd65 100644
--- a/base/process/process_posix.cc
+++ b/base/process/process_posix.cc
@@ -272,11 +272,14 @@ bool Process::CanBackgroundProcesses() {
#endif // !defined(OS_LINUX) && !defined(OS_CHROMEOS) && !defined(OS_MAC) &&
// !defined(OS_AIX)
+extern "C" void V8RecordReplayFinishRecording();
+
// static
void Process::TerminateCurrentProcessImmediately(int exit_code) {
#if BUILDFLAG(CLANG_PROFILING)
WriteClangProfilingProfile();
#endif
+ V8RecordReplayFinishRecording();
_exit(exit_code);
}
diff --git a/base/profiler/module_cache_mac.cc b/base/profiler/module_cache_mac.cc
index 30568aa2f8df..ae2cda233da8 100644
--- a/base/profiler/module_cache_mac.cc
+++ b/base/profiler/module_cache_mac.cc
@@ -140,6 +140,10 @@ class MacModule : public ModuleCache::Module {
// static
std::unique_ptr<const ModuleCache::Module> ModuleCache::CreateModuleForAddress(
uintptr_t address) {
+ if (getenv("RECORD_REPLAY_DRIVER")) {
+ // When recording/replaying dladdr doesn't behave as expected.
+ return nullptr;
+ }
Dl_info info;
if (!dladdr(reinterpret_cast<const void*>(address), &info))
return nullptr;
diff --git a/base/profiler/stack_sampler_mac.cc b/base/profiler/stack_sampler_mac.cc
index f1a7df2eb5ea..cf0519ba4a35 100644
--- a/base/profiler/stack_sampler_mac.cc
+++ b/base/profiler/stack_sampler_mac.cc
@@ -32,6 +32,11 @@ std::unique_ptr<StackSampler> StackSampler::Create(
RepeatingClosure record_sample_callback,
StackSamplerTestDelegate* test_delegate) {
DCHECK(!core_unwinders_factory);
+ if (getenv("RECORD_REPLAY_DRIVER")) {
+ // Stack sampling is disabled when recording/replaying, APIs used to inspect
+ // thread state are not currently supported when replaying.
+ return nullptr;
+ }
return std::make_unique<StackSamplerImpl>(
std::make_unique<StackCopierSuspend>(
std::make_unique<SuspendableThreadDelegateMac>(thread_token)),
diff --git a/base/record_replay.cc b/base/record_replay.cc
new file mode 100644
index 000000000000..8e69d538d1ab
--- /dev/null
+++ b/base/record_replay.cc
@@ -0,0 +1,121 @@
+// Copyright (c) 2021 Record Replay Inc.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/record_replay.h"
+
+#include <stdarg.h>
+
+namespace recordreplay {
+
+// Watch out for build environments where we aren't linked to V8.
+#ifndef NACL_TC_REV
+#define OP(RR) RR
+#define OP2(RR, NORR) RR
+#else
+#define OP(RR)
+#define OP2(RR, NORR) NORR
+#endif
+
+extern "C" bool V8IsRecordingOrReplaying();
+extern "C" bool V8IsRecording();
+extern "C" bool V8IsReplaying();
+extern "C" void V8RecordReplayAssertVA(const char* format, va_list args);
+extern "C" void V8RecordReplayPrintVA(const char* format, va_list args);
+extern "C" uintptr_t V8RecordReplayValue(const char* why, uintptr_t value);
+extern "C" void V8RecordReplayBytes(const char* why, void* buf, size_t size);
+extern "C" size_t V8RecordReplayCreateOrderedLock(const char* name);
+extern "C" void V8RecordReplayOrderedLock(int lock);
+extern "C" void V8RecordReplayOrderedUnlock(int lock);
+extern "C" void V8RecordReplayNewCheckpoint();
+extern "C" void V8RecordReplayBeginPassThroughEvents();
+extern "C" void V8RecordReplayEndPassThroughEvents();
+extern "C" bool V8RecordReplayHasDivergedFromRecording();
+extern "C" void V8RecordReplayRegisterPointer(void* ptr);
+extern "C" void V8RecordReplayUnregisterPointer(void* ptr);
+extern "C" int V8RecordReplayPointerId(void* ptr);
+extern "C" void* V8RecordReplayIdPointer(int id);
+
+bool IsRecordingOrReplaying() {
+ return OP2(V8IsRecordingOrReplaying(), false);
+}
+
+bool IsRecording() {
+ return OP2(V8IsRecording(), false);
+}
+
+bool IsReplaying() {
+ return OP2(V8IsReplaying(), false);
+}
+
+void Assert(const char* format, ...) {
+#ifndef NACL_TC_REV
+ va_list ap;
+ va_start(ap, format);
+ V8RecordReplayAssertVA(format, ap);
+ va_end(ap);
+#endif
+}
+
+void Print(const char* format, ...) {
+#ifndef NACL_TC_REV
+ va_list ap;
+ va_start(ap, format);
+ V8RecordReplayPrintVA(format, ap);
+ va_end(ap);
+#endif
+}
+
+uintptr_t RecordReplayValue(const char* why, uintptr_t v) {
+ return OP2(V8RecordReplayValue(why, v), v);
+}
+
+void RecordReplayBytes(const char* why, void* buf, size_t size) {
+ OP(V8RecordReplayBytes(why, buf, size));
+}
+
+size_t CreateOrderedLock(const char* name) {
+ return OP2(V8RecordReplayCreateOrderedLock(name), 0);
+}
+
+void OrderedLock(int lock) {
+ OP(V8RecordReplayOrderedLock(lock));
+}
+
+void OrderedUnlock(int lock) {
+ OP(V8RecordReplayOrderedUnlock(lock));
+}
+
+void NewCheckpoint() {
+ OP(V8RecordReplayNewCheckpoint());
+}
+
+void BeginPassThroughEvents() {
+ OP(V8RecordReplayBeginPassThroughEvents());
+}
+
+void EndPassThroughEvents() {
+ OP(V8RecordReplayEndPassThroughEvents());
+}
+
+bool HasDivergedFromRecording() {
+ return OP2(V8RecordReplayHasDivergedFromRecording(), false);
+}
+
+void RegisterPointer(void* ptr) {
+ OP(V8RecordReplayRegisterPointer(ptr));
+}
+
+void UnregisterPointer(void* ptr) {
+ OP(V8RecordReplayUnregisterPointer(ptr));
+}
+
+int PointerId(void* ptr) {
+ return OP2(V8RecordReplayPointerId(ptr), 0);
+}
+
+void* IdPointer(int id) {
+ return OP2(V8RecordReplayIdPointer(id), nullptr);
+}
+
+} // namespace recordreplay
diff --git a/base/record_replay.h b/base/record_replay.h
new file mode 100644
index 000000000000..b8849e92366b
--- /dev/null
+++ b/base/record_replay.h
@@ -0,0 +1,79 @@
+// Copyright (c) 2021 Record Replay Inc.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// API for interacting with the record/replay driver.
+
+#ifndef BASE_RECORD_REPLAY_H_
+#define BASE_RECORD_REPLAY_H_
+
+#include "base/check.h"
+
+namespace recordreplay {
+
+bool IsRecordingOrReplaying();
+bool IsRecording();
+bool IsReplaying();
+
+void Print(const char* format, ...);
+void Diagnostic(const char* format, ...);
+void Assert(const char* format, ...);
+void AssertBytes(const char* why, const void* buf, size_t size);
+
+uintptr_t RecordReplayValue(const char* why, uintptr_t v);
+void RecordReplayBytes(const char* why, void* buf, size_t size);
+
+size_t CreateOrderedLock(const char* name);
+void OrderedLock(int lock);
+void OrderedUnlock(int lock);
+
+struct AutoOrderedLock {
+ AutoOrderedLock(int id) : id_(id) { OrderedLock(id_); }
+ ~AutoOrderedLock() { OrderedUnlock(id_); }
+ int id_;
+};
+
+void InvalidateRecording(const char* why);
+void NewCheckpoint();
+
+bool AreEventsDisallowed();
+void BeginPassThroughEvents();
+void EndPassThroughEvents();
+void BeginDisallowEvents();
+void EndDisallowEvents();
+
+struct AutoPassThroughEvents {
+ AutoPassThroughEvents() { BeginPassThroughEvents(); }
+ ~AutoPassThroughEvents() { EndPassThroughEvents(); }
+};
+
+struct AutoDisallowEvents {
+ AutoDisallowEvents() { BeginDisallowEvents(); }
+ ~AutoDisallowEvents() { EndDisallowEvents(); }
+};
+
+bool HasDivergedFromRecording();
+
+void RegisterPointer(void* ptr);
+void UnregisterPointer(void* ptr);
+int PointerId(void* ptr);
+void* IdPointer(int id);
+
+// stl comparator that uses pointer IDs to compare elements when recording/replaying,
+// giving a deterministic sort order.
+struct CompareByPointerId {
+ template <typename T>
+ bool operator()(const T* a, const T* b) const {
+ if (recordreplay::IsRecordingOrReplaying()) {
+ int ida = recordreplay::PointerId((void*)a);
+ int idb = recordreplay::PointerId((void*)b);
+ CHECK(ida && idb);
+ return ida < idb;
+ }
+ return (uintptr_t)a < (uintptr_t)b;
+ }
+};
+
+} // namespace recordreplay
+
+#endif // BASE_RECORD_REPLAY_H_
diff --git a/base/synchronization/lock.cc b/base/synchronization/lock.cc
index 03297ada52fb..f824b64c8f23 100644
--- a/base/synchronization/lock.cc
+++ b/base/synchronization/lock.cc
@@ -12,7 +12,7 @@
namespace base {
-Lock::Lock() : lock_() {
+Lock::Lock(const char* ordered_name) : lock_(ordered_name) {
}
Lock::~Lock() {
diff --git a/base/synchronization/lock.h b/base/synchronization/lock.h
index d9cfbb758f13..6db1f77b260a 100644
--- a/base/synchronization/lock.h
+++ b/base/synchronization/lock.h
@@ -22,7 +22,7 @@ class LOCKABLE BASE_EXPORT Lock {
public:
#if !DCHECK_IS_ON()
// Optimized wrapper implementation
- Lock() : lock_() {}
+ Lock(const char* ordered_name = nullptr) : lock_(ordered_name) {}
~Lock() {}
void Acquire() EXCLUSIVE_LOCK_FUNCTION() { lock_.Lock(); }
@@ -37,7 +37,7 @@ class LOCKABLE BASE_EXPORT Lock {
// Null implementation if not debug.
void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
#else
- Lock();
+ Lock(const char* ordered_name = nullptr);
~Lock();
// NOTE: We do not permit recursive locks and will commonly fire a DCHECK() if
diff --git a/base/synchronization/lock_impl.h b/base/synchronization/lock_impl.h
index cfe53db2f52a..4917365e244e 100644
--- a/base/synchronization/lock_impl.h
+++ b/base/synchronization/lock_impl.h
@@ -47,7 +47,7 @@ class BASE_EXPORT LockImpl {
using NativeHandle = pthread_mutex_t;
#endif
- LockImpl();
+ LockImpl(const char* ordered_name = nullptr);
~LockImpl();
// If the lock is not held, take it and return true. If the lock is already
diff --git a/base/synchronization/lock_impl_posix.cc b/base/synchronization/lock_impl_posix.cc
index 0793661d62b5..dfb24198a622 100644
--- a/base/synchronization/lock_impl_posix.cc
+++ b/base/synchronization/lock_impl_posix.cc
@@ -14,6 +14,10 @@
#include "base/synchronization/synchronization_buildflags.h"
#include "build/build_config.h"
+#ifdef OS_MAC
+#include <dlfcn.h>
+#endif
+
namespace base {
namespace internal {
@@ -54,7 +58,31 @@ std::string SystemErrorCodeToString(int error_code) {
#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 1
#endif
-LockImpl::LockImpl() {
+#ifdef OS_MAC
+
+static void (*gAddOrderedPthreadMutexFn)(const char*, pthread_mutex_t*);
+
+static void RecordReplayAddOrderedPthreadMutex(const char* name,
+ pthread_mutex_t* mutex) {
+ if (!gAddOrderedPthreadMutexFn) {
+ void* fnptr = dlsym(RTLD_DEFAULT, "RecordReplayAddOrderedPthreadMutex");
+ if (!fnptr) {
+ return;
+ }
+ gAddOrderedPthreadMutexFn = reinterpret_cast<void(*)(const char*, pthread_mutex_t*)>(fnptr);
+ }
+
+ gAddOrderedPthreadMutexFn(name, mutex);
+}
+
+#else
+
+static void RecordReplayAddOrderedPthreadMutex(const char* name,
+ pthread_mutex_t* mutex) {}
+
+#endif
+
+LockImpl::LockImpl(const char* ordered_name) {
pthread_mutexattr_t mta;
int rv = pthread_mutexattr_init(&mta);
DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
@@ -73,6 +101,10 @@ LockImpl::LockImpl() {
DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
rv = pthread_mutexattr_destroy(&mta);
DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
+
+ if (ordered_name) {
+ RecordReplayAddOrderedPthreadMutex(ordered_name, &native_handle_);
+ }
}
LockImpl::~LockImpl() {
diff --git a/base/synchronization/waitable_event.h b/base/synchronization/waitable_event.h
index 4a10f3973c1b..4a07680cf846 100644
--- a/base/synchronization/waitable_event.h
+++ b/base/synchronization/waitable_event.h
@@ -283,6 +283,9 @@ class BASE_EXPORT WaitableEvent {
// blocked as opposed to idle (and potentially replaced if part of a pool).
bool waiting_is_blocking_ = true;
+ // When recording/replaying, this is used to order wait/signal calls.
+ int record_replay_ordered_lock_id_ = 0;
+
DISALLOW_COPY_AND_ASSIGN(WaitableEvent);
};
diff --git a/base/synchronization/waitable_event_mac.cc b/base/synchronization/waitable_event_mac.cc
index 3e1d57cad051..d8e497ebf58f 100644
--- a/base/synchronization/waitable_event_mac.cc
+++ b/base/synchronization/waitable_event_mac.cc
@@ -16,6 +16,7 @@
#include "base/mac/scoped_dispatch_object.h"
#include "base/optional.h"
#include "base/posix/eintr_wrapper.h"
+#include "base/record_replay.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
@@ -24,9 +25,22 @@
namespace base {
+// The record/replay driver does not order mach_msg calls when they are used
+// for inter-thread communication. We use an ordered lock to ensure that
+// threads do not return from event waits until after the event has actually
+// been signaled.
+static inline void RecordReplayEnsureOrdered(int lock_id) {
+ if (lock_id) {
+ recordreplay::OrderedLock(lock_id);
+ recordreplay::OrderedUnlock(lock_id);
+ }
+}
+
WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
InitialState initial_state)
: policy_(reset_policy) {
+ record_replay_ordered_lock_id_ = (int)recordreplay::CreateOrderedLock("WaitableEvent");
+
mach_port_options_t options{};
options.flags = MPO_INSERT_SEND_RIGHT;
options.mpl.mpl_qlimit = 1;
@@ -50,6 +64,8 @@ void WaitableEvent::Reset() {
// NO_THREAD_SAFETY_ANALYSIS: Runtime dependent locking.
void WaitableEvent::Signal() NO_THREAD_SAFETY_ANALYSIS {
+ RecordReplayEnsureOrdered(record_replay_ordered_lock_id_);
+
// If using the slow watch-list, copy the watchers to a local. After
// mach_msg(), the event object may be deleted by an awoken thread.
const bool use_slow_path = UseSlowWatchList(policy_);
@@ -111,8 +127,10 @@ void WaitableEvent::Wait() {
}
bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
- if (wait_delta <= TimeDelta())
+ if (wait_delta <= TimeDelta()) {
+ RecordReplayEnsureOrdered(record_replay_ordered_lock_id_);
return IsSignaled();
+ }
// Record the event that this thread is blocking upon (for hang diagnosis) and
// consider blocked for scheduling purposes. Ignore this for non-blocking
@@ -169,6 +187,8 @@ bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
timeout, MACH_PORT_NULL);
}
+ RecordReplayEnsureOrdered(record_replay_ordered_lock_id_);
+
if (kr == KERN_SUCCESS) {
return true;
} else if (rcv_size == 0 && kr == MACH_RCV_TOO_LARGE) {
diff --git a/base/task/common/checked_lock.h b/base/task/common/checked_lock.h
index 4399ec477a54..a696428776ea 100644
--- a/base/task/common/checked_lock.h
+++ b/base/task/common/checked_lock.h
@@ -79,6 +79,7 @@ class LOCKABLE CheckedLock : public CheckedLockImpl {
class LOCKABLE CheckedLock : public Lock {
public:
CheckedLock() = default;
+ CheckedLock(const char* ordered_name) : Lock(ordered_name) {}
explicit CheckedLock(const CheckedLock*) {}
explicit CheckedLock(UniversalPredecessor) {}
explicit CheckedLock(UniversalSuccessor) {}
diff --git a/base/task/common/task_annotator.cc b/base/task/common/task_annotator.cc
index 528105d5bd38..9c6639f46016 100644
--- a/base/task/common/task_annotator.cc
+++ b/base/task/common/task_annotator.cc
@@ -12,6 +12,7 @@
#include "base/hash/md5.h"
#include "base/no_destructor.h"
#include "base/ranges/algorithm.h"
+#include "base/record_replay.h"
#include "base/sys_byteorder.h"
#include "base/threading/thread_local.h"
#include "base/trace_event/base_tracing.h"
@@ -112,6 +113,8 @@ void TaskAnnotator::WillQueueTask(const char* trace_event_name,
void TaskAnnotator::RunTask(const char* trace_event_name,
PendingTask* pending_task) {
+ recordreplay::Assert("TaskAnnotator::RunTask Start");
+
DCHECK(trace_event_name);
DCHECK(pending_task);
@@ -172,6 +175,8 @@ void TaskAnnotator::RunTask(const char* trace_event_name,
task_backtrace.front() = nullptr;
task_backtrace.back() = nullptr;
debug::Alias(&task_backtrace);
+
+ recordreplay::Assert("TaskAnnotator::RunTask Done");
}
uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const {
diff --git a/base/task/sequence_manager/atomic_flag_set.cc b/base/task/sequence_manager/atomic_flag_set.cc
index 67a149f44593..f9c87e49f231 100644
--- a/base/task/sequence_manager/atomic_flag_set.cc
+++ b/base/task/sequence_manager/atomic_flag_set.cc
@@ -9,6 +9,7 @@
#include "base/bits.h"
#include "base/callback.h"
#include "base/check_op.h"
+#include "base/record_replay.h"
namespace base {
namespace sequence_manager {
@@ -42,6 +43,7 @@ AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlag&& other)
void AtomicFlagSet::AtomicFlag::SetActive(bool active) {
DCHECK(group_);
+ recordreplay::AutoOrderedLock lock(group_->ordered_lock_id_);
if (active) {
// Release semantics are required to ensure that all memory accesses made on
// this thread happen-before any others done on the thread running the
@@ -107,12 +109,22 @@ AtomicFlagSet::AtomicFlag AtomicFlagSet::AddFlag(RepeatingClosure callback) {
void AtomicFlagSet::RunActiveCallbacks() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
+
+ recordreplay::Assert("AtomicFlagSet::RunActiveCallbacks Start");
+
for (Group* iter = alloc_list_head_.get(); iter; iter = iter->next.get()) {
// Acquire semantics are required to guarantee that all memory side-effects
// made by other threads that were allowed to perform operations are
// synchronized with this thread before it returns from this method.
- size_t active_flags = std::atomic_exchange_explicit(
+ size_t active_flags;
+ {
+ recordreplay::AutoOrderedLock lock(iter->ordered_lock_id_);
+ active_flags = std::atomic_exchange_explicit(
&iter->flags, size_t{0}, std::memory_order_acquire);
+ }
+
+ recordreplay::Assert("AtomicFlagSet::RunActiveCallbacks #1 %lu", active_flags);
+
// This is O(number of bits set).
while (active_flags) {
int index = Group::IndexOfFirstFlagSet(active_flags);
@@ -120,10 +132,13 @@ void AtomicFlagSet::RunActiveCallbacks() const {
active_flags ^= size_t{1} << index;
iter->flag_callbacks[index].Run();
}
+
+ recordreplay::Assert("AtomicFlagSet::RunActiveCallbacks #2");
}
}
-AtomicFlagSet::Group::Group() = default;
+AtomicFlagSet::Group::Group()
+ : ordered_lock_id_(recordreplay::CreateOrderedLock("AtomicFlagSet::Group")) {}
AtomicFlagSet::Group::~Group() {
DCHECK_EQ(allocated_flags, 0u);
diff --git a/base/task/sequence_manager/atomic_flag_set.h b/base/task/sequence_manager/atomic_flag_set.h
index 42dd2b1bb34f..26b09579a29c 100644
--- a/base/task/sequence_manager/atomic_flag_set.h
+++ b/base/task/sequence_manager/atomic_flag_set.h
@@ -103,6 +103,7 @@ class BASE_EXPORT AtomicFlagSet {
std::unique_ptr<Group> next;
Group* partially_free_list_prev = nullptr;
Group* partially_free_list_next = nullptr;
+ int ordered_lock_id_ = 0;
bool IsFull() const;
diff --git a/base/task/sequence_manager/real_time_domain.cc b/base/task/sequence_manager/real_time_domain.cc
index 2215c10eb893..33381d8f3f2a 100644
--- a/base/task/sequence_manager/real_time_domain.cc
+++ b/base/task/sequence_manager/real_time_domain.cc
@@ -4,6 +4,7 @@
#include "base/task/sequence_manager/real_time_domain.h"
+#include "base/record_replay.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
namespace base {
@@ -25,19 +26,25 @@ TimeTicks RealTimeDomain::Now() const {
}
Optional<TimeDelta> RealTimeDomain::DelayTillNextTask(LazyNow* lazy_now) {
+ recordreplay::Assert("RealTimeDomain::DelayTillNextTask Start");
+
Optional<TimeTicks> next_run_time = NextScheduledRunTime();
- if (!next_run_time)
+ if (!next_run_time) {
+ recordreplay::Assert("RealTimeDomain::DelayTillNextTask #1");
return nullopt;
+ }
TimeTicks now = lazy_now->Now();
if (now >= next_run_time) {
// Overdue work needs to be run immediately.
+ recordreplay::Assert("RealTimeDomain::DelayTillNextTask #2");
return TimeDelta();
}
TimeDelta delay = *next_run_time - now;
TRACE_EVENT1("sequence_manager", "RealTimeDomain::DelayTillNextTask",
"delay_ms", delay.InMillisecondsF());
+ recordreplay::Assert("RealTimeDomain::DelayTillNextTask Done %.2f", delay.InSecondsF());
return delay;
}
diff --git a/base/task/sequence_manager/sequence_manager_impl.cc b/base/task/sequence_manager/sequence_manager_impl.cc
index a77e4ae15c07..8656f7bb13e0 100644
--- a/base/task/sequence_manager/sequence_manager_impl.cc
+++ b/base/task/sequence_manager/sequence_manager_impl.cc
@@ -19,6 +19,7 @@
#include "base/optional.h"
#include "base/rand_util.h"
#include "base/ranges/algorithm.h"
+#include "base/record_replay.h"
#include "base/task/sequence_manager/real_time_domain.h"
#include "base/task/sequence_manager/task_time_observer.h"
#include "base/task/sequence_manager/thread_controller_impl.h"
@@ -591,11 +592,16 @@ void SequenceManagerImpl::LogTaskDebugInfo(
Task* SequenceManagerImpl::SelectNextTaskImpl(SelectTaskOption option) {
CHECK(Validate());
+ recordreplay::Assert("SequenceManagerImpl::SelectNextTaskImpl Start");
+
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"SequenceManagerImpl::SelectNextTask");
ReloadEmptyWorkQueues();
+
+ recordreplay::Assert("SequenceManagerImpl::SelectNextTaskImpl #1");
+
LazyNow lazy_now(controller_->GetClock());
MoveReadyDelayedTasksToWorkQueues(&lazy_now);
@@ -607,20 +613,29 @@ Task* SequenceManagerImpl::SelectNextTaskImpl(SelectTaskOption option) {
}
while (true) {
+ recordreplay::Assert("SequenceManagerImpl::SelectNextTaskImpl #2");
+
internal::WorkQueue* work_queue =
main_thread_only().selector.SelectWorkQueueToService(option);
+
+ recordreplay::Assert("SequenceManagerImpl::SelectNextTaskImpl #3");
+
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"), "SequenceManager",
this,
AsValueWithSelectorResultForTracing(work_queue,
/* force_verbose */ false));
- if (!work_queue)
+ if (!work_queue) {
+ recordreplay::Assert("SequenceManagerImpl::SelectNextTaskImpl #4");
return nullptr;
+ }
// If the head task was canceled, remove it and run the selector again.
- if (UNLIKELY(work_queue->RemoveAllCanceledTasksFromFront()))
+ if (UNLIKELY(work_queue->RemoveAllCanceledTasksFromFront())) {
+ recordreplay::Assert("SequenceManagerImpl::SelectNextTaskImpl #5");
continue;
+ }
if (UNLIKELY(work_queue->GetFrontTask()->nestable ==
Nestable::kNonNestable &&
@@ -629,6 +644,7 @@ Task* SequenceManagerImpl::SelectNextTaskImpl(SelectTaskOption option) {
// the additional delay should not be a problem.
// Note because we don't delete queues while nested, it's perfectly OK to
// store the raw pointer for |queue| here.
+ recordreplay::Assert("SequenceManagerImpl::SelectNextTaskImpl #6");
internal::TaskQueueImpl::DeferredNonNestableTask deferred_task{
work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
work_queue->queue_type()};
@@ -639,6 +655,7 @@ Task* SequenceManagerImpl::SelectNextTaskImpl(SelectTaskOption option) {
if (UNLIKELY(!ShouldRunTaskOfPriority(
work_queue->task_queue()->GetQueuePriority()))) {
+ recordreplay::Assert("SequenceManagerImpl::SelectNextTaskImpl #7");
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"SequenceManager.YieldToNative");
return nullptr;
@@ -648,6 +665,8 @@ Task* SequenceManagerImpl::SelectNextTaskImpl(SelectTaskOption option) {
LogTaskDebugInfo(work_queue);
#endif // DCHECK_IS_ON() && !defined(OS_NACL)
+ recordreplay::Assert("SequenceManagerImpl::SelectNextTaskImpl #8");
+
main_thread_only().task_execution_stack.emplace_back(
work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
InitializeTaskTiming(work_queue->task_queue()));
@@ -686,13 +705,18 @@ TimeDelta SequenceManagerImpl::DelayTillNextTask(
SelectTaskOption option) const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
+ recordreplay::Assert("SequenceManagerImpl::DelayTillNextTask Start");
+
if (auto priority =
main_thread_only().selector.GetHighestPendingPriority(option)) {
// If the selector has non-empty queues we trivially know there is immediate
// work to be done. However we may want to yield to native work if it is
// more important.
- if (UNLIKELY(!ShouldRunTaskOfPriority(*priority)))
+ if (UNLIKELY(!ShouldRunTaskOfPriority(*priority))) {
+ recordreplay::Assert("SequenceManagerImpl::DelayTillNextTask #1");
return GetDelayTillNextDelayedTask(lazy_now, option);
+ }
+ recordreplay::Assert("SequenceManagerImpl::DelayTillNextTask #2");
return TimeDelta();
}
@@ -703,8 +727,11 @@ TimeDelta SequenceManagerImpl::DelayTillNextTask(
if (auto priority =
main_thread_only().selector.GetHighestPendingPriority(option)) {
- if (UNLIKELY(!ShouldRunTaskOfPriority(*priority)))
+ if (UNLIKELY(!ShouldRunTaskOfPriority(*priority))) {
+ recordreplay::Assert("SequenceManagerImpl::DelayTillNextTask #3");
return GetDelayTillNextDelayedTask(lazy_now, option);
+ }
+ recordreplay::Assert("SequenceManagerImpl::DelayTillNextTask #4");
return TimeDelta();
}
@@ -712,7 +739,9 @@ TimeDelta SequenceManagerImpl::DelayTillNextTask(
// call MoveReadyDelayedTasksToWorkQueues because it's assumed
// DelayTillNextTask will return TimeDelta>() if the delayed task is due to
// run now.
- return GetDelayTillNextDelayedTask(lazy_now, option);
+ TimeDelta rv = GetDelayTillNextDelayedTask(lazy_now, option);
+ recordreplay::Assert("SequenceManagerImpl::DelayTillNextTask #5 %.2f", rv.InSecondsF());
+ return rv;
}
TimeDelta SequenceManagerImpl::GetDelayTillNextDelayedTask(
@@ -933,7 +962,15 @@ bool SequenceManagerImpl::GetAndClearSystemIsQuiescentBit() {
}
EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
- return enqueue_order_generator_.GenerateNext();
+ EnqueueOrder rv = enqueue_order_generator_.GenerateNext();
+
+ // EnqueueOrders need to be the same when replaying as when recording,
+ // because they affect the order in which tasks will run. We could use
+ // an ordered lock here, but it's more efficient to just record/replay
+ // the EnqueueOrders which were created when recording.
+ recordreplay::RecordReplayBytes("GetNextSequenceNumber", &rv, sizeof(rv));
+
+ return rv;
}
std::unique_ptr<trace_event::ConvertableToTraceFormat>
diff --git a/base/task/sequence_manager/sequence_manager_impl.h b/base/task/sequence_manager/sequence_manager_impl.h
index 225dd4a69a58..6187763ca3f2 100644
--- a/base/task/sequence_manager/sequence_manager_impl.h
+++ b/base/task/sequence_manager/sequence_manager_impl.h
@@ -23,6 +23,7 @@
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_pump_type.h"
#include "base/pending_task.h"
+#include "base/record_replay.h"
#include "base/run_loop.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
@@ -278,7 +279,7 @@ class BASE_EXPORT SequenceManagerImpl
internal::TaskQueueSelector selector;
ObserverList<TaskObserver>::Unchecked task_observers;
ObserverList<TaskTimeObserver>::Unchecked task_time_observers;
- std::set<TimeDomain*> time_domains;
+ std::set<TimeDomain*, recordreplay::CompareByPointerId> time_domains;
std::unique_ptr<internal::RealTimeDomain> real_time_domain;
// If true MaybeReclaimMemory will attempt to reclaim memory.
diff --git a/base/task/sequence_manager/task_queue_impl.cc b/base/task/sequence_manager/task_queue_impl.cc
index be02224503f0..cd9b8ab4bbe9 100644
--- a/base/task/sequence_manager/task_queue_impl.cc
+++ b/base/task/sequence_manager/task_queue_impl.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/ranges/algorithm.h"
+#include "base/record_replay.h"
#include "base/strings/stringprintf.h"
#include "base/task/common/scoped_defer_task_posting.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
@@ -120,6 +121,7 @@ TaskQueueImpl::TaskQueueImpl(SequenceManagerImpl* sequence_manager,
? sequence_manager->associated_thread()
: AssociatedThreadId::CreateBound()),
task_poster_(MakeRefCounted<GuardedTaskPoster>(this)),
+ any_thread_lock_("TaskQueueImpl.any_thread_lock_"),
any_thread_(time_domain),
main_thread_only_(this, time_domain),
empty_queues_to_reload_handle_(
@@ -129,6 +131,7 @@ TaskQueueImpl::TaskQueueImpl(SequenceManagerImpl* sequence_manager,
should_monitor_quiescence_(spec.should_monitor_quiescence),
should_notify_observers_(spec.should_notify_observers),
delayed_fence_allowed_(spec.delayed_fence_allowed) {
+ recordreplay::RegisterPointer(this);
DCHECK(time_domain);
UpdateCrossThreadQueueStateLocked();
// SequenceManager can't be set later, so we need to prevent task runners
@@ -138,6 +141,7 @@ TaskQueueImpl::TaskQueueImpl(SequenceManagerImpl* sequence_manager,
}
TaskQueueImpl::~TaskQueueImpl() {
+ recordreplay::UnregisterPointer(this);
#if DCHECK_IS_ON()
base::internal::CheckedAutoLock lock(any_thread_lock_);
// NOTE this check shouldn't fire because |SequenceManagerImpl::queues_|
@@ -291,6 +295,10 @@ void TaskQueueImpl::PostImmediateTaskImpl(PostedTask task,
// risk breaking the assumption that sequence numbers increase monotonically
// within a queue.
EnqueueOrder sequence_number = sequence_manager_->GetNextSequenceNumber();
+
+ recordreplay::Assert("TaskQueueImpl::PostImmediateTaskImpl %lu",
+ (size_t)sequence_number);
+
bool was_immediate_incoming_queue_empty =
any_thread_.immediate_incoming_queue.empty();
// Delayed run time is null for an immediate task.
@@ -362,6 +370,9 @@ void TaskQueueImpl::PostDelayedTaskImpl(PostedTask task,
// Lock-free fast path for delayed tasks posted from the main thread.
EnqueueOrder sequence_number = sequence_manager_->GetNextSequenceNumber();
+ recordreplay::Assert("TaskQueueImpl::PostDelayedTaskImpl #1 %lu",
+ (size_t)sequence_number);
+
TimeTicks time_domain_now = main_thread_only().time_domain->Now();
TimeTicks time_domain_delayed_run_time = time_domain_now + task.delay;
if (sequence_manager_->GetAddQueueTimeToTasks())
@@ -378,6 +389,9 @@ void TaskQueueImpl::PostDelayedTaskImpl(PostedTask task,
// assumption prove to be false in future, we may need to revisit this.
EnqueueOrder sequence_number = sequence_manager_->GetNextSequenceNumber();
+ recordreplay::Assert("TaskQueueImpl::PostDelayedTaskImpl #2 %lu",
+ (size_t)sequence_number);
+
TimeTicks time_domain_now;
{
base::internal::CheckedAutoLock lock(any_thread_lock_);
@@ -456,6 +470,8 @@ void TaskQueueImpl::ScheduleDelayedWorkTask(Task pending_task) {
}
void TaskQueueImpl::ReloadEmptyImmediateWorkQueue() {
+ recordreplay::Assert("TaskQueueImpl::ReloadEmptyImmediateWorkQueue");
+
DCHECK(main_thread_only().immediate_work_queue->Empty());
main_thread_only().immediate_work_queue->TakeImmediateIncomingQueueTasks();
@@ -570,6 +586,9 @@ Optional<TimeTicks> TaskQueueImpl::GetNextScheduledWakeUp() {
}
void TaskQueueImpl::MoveReadyDelayedTasksToWorkQueue(LazyNow* lazy_now) {
+ recordreplay::Assert("TaskQueueImpl::MoveReadyDelayedTasksToWorkQueue %lu",
+ recordreplay::PointerId(this));
+
// Enqueue all delayed tasks that should be running now, skipping any that
// have been canceled.
WorkQueue::TaskPusher delayed_work_queue_task_pusher(
@@ -595,6 +614,9 @@ void TaskQueueImpl::MoveReadyDelayedTasksToWorkQueue(LazyNow* lazy_now) {
DCHECK(!task->enqueue_order_set());
task->set_enqueue_order(sequence_manager_->GetNextSequenceNumber());
+ recordreplay::Assert("TaskQueueImpl::MoveReadyDelayedTasksToWorkQueue %lu",
+ task->enqueue_order());
+
delayed_work_queue_task_pusher.Push(task);
main_thread_only().delayed_incoming_queue.pop();
}
@@ -657,6 +679,9 @@ void TaskQueueImpl::SetQueuePriority(TaskQueue::QueuePriority priority) {
main_thread_only()
.enqueue_order_at_which_we_became_unblocked_with_normal_priority =
sequence_manager_->GetNextSequenceNumber();
+
+ recordreplay::Assert("TaskQueueImpl::SetQueuePriority %lu",
+ main_thread_only().enqueue_order_at_which_we_became_unblocked_with_normal_priority);
}
}
@@ -808,6 +833,9 @@ void TaskQueueImpl::InsertFence(TaskQueue::InsertFencePosition position) {
? sequence_manager_->GetNextSequenceNumber()
: EnqueueOrder::blocking_fence();
+ recordreplay::Assert("TaskQueueImpl::InsertFence %d %lu",
+ position, (size_t)current_fence);
+
// Tasks posted after this point will have a strictly higher enqueue order
// and will be blocked from running.
main_thread_only().current_fence = current_fence;
@@ -1365,6 +1393,9 @@ void TaskQueueImpl::OnQueueUnblocked() {
main_thread_only().enqueue_order_at_which_we_became_unblocked =
sequence_manager_->GetNextSequenceNumber();
+ recordreplay::Assert("TaskQueueImpl::OnQueueUnblocked %lu",
+ main_thread_only().enqueue_order_at_which_we_became_unblocked);
+
static_assert(TaskQueue::QueuePriority::kLowPriority >
TaskQueue::QueuePriority::kNormalPriority,
"Priorities are not ordered as expected");
diff --git a/base/task/sequence_manager/task_queue_selector.cc b/base/task/sequence_manager/task_queue_selector.cc
index 3bacdc49e084..0b83fb9e49df 100644
--- a/base/task/sequence_manager/task_queue_selector.cc
+++ b/base/task/sequence_manager/task_queue_selector.cc
@@ -8,6 +8,7 @@
#include "base/bits.h"
#include "base/check_op.h"
+#include "base/record_replay.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/work_queue.h"
@@ -171,9 +172,13 @@ WorkQueue* TaskQueueSelector::SelectWorkQueueToService(
SelectTaskOption option) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
+ recordreplay::Assert("TaskQueueSelector::SelectWorkQueueToService Start");
+
auto highest_priority = GetHighestPendingPriority(option);
- if (!highest_priority.has_value())
+ if (!highest_priority.has_value()) {
+ recordreplay::Assert("TaskQueueSelector::SelectWorkQueueToService #1");
return nullptr;
+ }
// Select the priority from which we will select a task. Usually this is
// the highest priority for which we have work, unless we are starving a lower
@@ -193,6 +198,8 @@ WorkQueue* TaskQueueSelector::SelectWorkQueueToService(
:
#endif
ChooseImmediateOnlyWithPriority<SetOperationOldest>(priority);
+ recordreplay::Assert("TaskQueueSelector::SelectWorkQueueToService #2 %lu",
+ recordreplay::PointerId(queue));
return queue;
}
@@ -211,6 +218,9 @@ WorkQueue* TaskQueueSelector::SelectWorkQueueToService(
} else {
immediate_starvation_count_ = 0;
}
+
+ recordreplay::Assert("TaskQueueSelector::SelectWorkQueueToService #3 %lu",
+ recordreplay::PointerId(queue));
return queue;
}
@@ -227,23 +237,31 @@ void TaskQueueSelector::SetTaskQueueSelectorObserver(Observer* observer) {
Optional<TaskQueue::QueuePriority> TaskQueueSelector::GetHighestPendingPriority(
SelectTaskOption option) const {
+ recordreplay::Assert("TaskQueueSelector::GetHighestPendingPriority Start %d", option);
+
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
- if (!active_priority_tracker_.HasActivePriority())
+ if (!active_priority_tracker_.HasActivePriority()) {
+ recordreplay::Assert("TaskQueueSelector::GetHighestPendingPriority #1");
return nullopt;
+ }
TaskQueue::QueuePriority highest_priority =
active_priority_tracker_.HighestActivePriority();
- if (option != SelectTaskOption::kSkipDelayedTask)
+ if (option != SelectTaskOption::kSkipDelayedTask) {
+ recordreplay::Assert("TaskQueueSelector::GetHighestPendingPriority #2 %d", highest_priority);
return highest_priority;
+ }
for (; highest_priority != TaskQueue::kQueuePriorityCount;
highest_priority = NextPriority(highest_priority)) {
if (active_priority_tracker_.IsActive(highest_priority) &&
!immediate_work_queue_sets_.IsSetEmpty(highest_priority)) {
+ recordreplay::Assert("TaskQueueSelector::GetHighestPendingPriority #3 %d", highest_priority);
return highest_priority;
}
}
+ recordreplay::Assert("TaskQueueSelector::GetHighestPendingPriority #4");
return nullopt;
}
@@ -263,6 +281,8 @@ TaskQueueSelector::ActivePriorityTracker::ActivePriorityTracker() = default;
void TaskQueueSelector::ActivePriorityTracker::SetActive(
TaskQueue::QueuePriority priority,
bool is_active) {
+ recordreplay::Assert("ActivePriorityTracker::SetActive %d %d", priority, is_active);
+
DCHECK_LT(priority, TaskQueue::QueuePriority::kQueuePriorityCount);
DCHECK_NE(IsActive(priority), is_active);
if (is_active) {
diff --git a/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc b/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
index e2322c8cf03e..e7f77e9cdf5e 100644
--- a/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
+++ b/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump.h"
+#include "base/record_replay.h"
#include "base/threading/hang_watcher.h"
#include "base/time/tick_clock.h"
#include "base/trace_event/base_tracing.h"
@@ -257,6 +258,8 @@ void ThreadControllerWithMessagePumpImpl::BeforeWait() {
MessagePump::Delegate::NextWorkInfo
ThreadControllerWithMessagePumpImpl::DoWork() {
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWork Start");
+
MaybeStartHangWatchScopeEnabled();
work_deduplicator_.OnWorkStarted();
@@ -270,6 +273,7 @@ ThreadControllerWithMessagePumpImpl::DoWork() {
ShouldScheduleWork::kScheduleImmediate) {
// Need to run new work immediately, but due to the contract of DoWork
// we only need to return a null TimeTicks to ensure that happens.
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWork #1");
return MessagePump::Delegate::NextWorkInfo();
}
@@ -277,6 +281,7 @@ ThreadControllerWithMessagePumpImpl::DoWork() {
// special-casing here avoids unnecessarily sampling Now() when out of work.
if (delay_till_next_task.is_max()) {
main_thread_only().next_delayed_do_work = TimeTicks::Max();
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWork #2");
return {TimeTicks::Max()};
}
@@ -293,10 +298,13 @@ ThreadControllerWithMessagePumpImpl::DoWork() {
main_thread_only().next_delayed_do_work =
main_thread_only().quit_runloop_after;
// If we've passed |quit_runloop_after| there's no more work to do.
- if (continuation_lazy_now.Now() >= main_thread_only().quit_runloop_after)
+ if (continuation_lazy_now.Now() >= main_thread_only().quit_runloop_after) {
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWork #3");
return {TimeTicks::Max()};
+ }
}
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWork #4");
return {CapAtOneDay(main_thread_only().next_delayed_do_work,
&continuation_lazy_now),
continuation_lazy_now.Now()};
@@ -307,24 +315,31 @@ TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"ThreadControllerImpl::DoWork");
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl Start");
+
if (!main_thread_only().task_execution_allowed) {
// Broadcast in a trace event that application tasks were disallowed. This
// helps spot nested loops that intentionally starve application tasks.
TRACE_EVENT0("base", "ThreadController: application tasks disallowed");
- if (main_thread_only().quit_runloop_after == TimeTicks::Max())
+ if (main_thread_only().quit_runloop_after == TimeTicks::Max()) {
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl #1");
return TimeDelta::Max();
+ }
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl #2");
return main_thread_only().quit_runloop_after - continuation_lazy_now->Now();
}
DCHECK(main_thread_only().task_source);
for (int i = 0; i < main_thread_only().work_batch_size; i++) {
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl #3");
const SequencedTaskSource::SelectTaskOption select_task_option =
power_monitor_.IsProcessInPowerSuspendState()
? SequencedTaskSource::SelectTaskOption::kSkipDelayedTask
: SequencedTaskSource::SelectTaskOption::kDefault;
Task* task =
main_thread_only().task_source->SelectNextTask(select_task_option);
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl #4 %d", !!task);
if (!task)
break;
@@ -344,6 +359,8 @@ TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
// See https://crbug.com/681863 and https://crbug.com/874982
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "RunTask");
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl #5");
+
{
// Trace events should finish before we call DidRunTask to ensure that
// SequenceManager trace events do not interfere with them.
@@ -351,6 +368,8 @@ TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
task_annotator_.RunTask("SequenceManager RunTask", task);
}
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl #6");
+
// This processes microtasks, hence all scoped operations above must end
// after it.
main_thread_only().task_source->DidRunTask();
@@ -359,12 +378,16 @@ TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
// When Quit() is called we must stop running the batch because the caller
// expects per-task granularity.
- if (main_thread_only().quit_pending)
+ if (main_thread_only().quit_pending) {
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl #7");
break;
+ }
}
- if (main_thread_only().quit_pending)
+ if (main_thread_only().quit_pending) {
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl #8");
return TimeDelta::Max();
+ }
work_deduplicator_.WillCheckForMoreWork();
@@ -374,9 +397,16 @@ TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
power_monitor_.IsProcessInPowerSuspendState()
? SequencedTaskSource::SelectTaskOption::kSkipDelayedTask
: SequencedTaskSource::SelectTaskOption::kDefault;
+
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl 8.1 %d",
+ select_task_option);
+
TimeDelta do_work_delay = main_thread_only().task_source->DelayTillNextTask(
continuation_lazy_now, select_task_option);
DCHECK_GE(do_work_delay, TimeDelta());
+
+ recordreplay::Assert("ThreadControllerWithMessagePumpImpl::DoWorkImpl Done %.2f",
+ do_work_delay.InSecondsF());
return do_work_delay;
}
diff --git a/base/task/sequence_manager/time_domain.cc b/base/task/sequence_manager/time_domain.cc
index f6c8e6abcae7..1269f60c1235 100644
--- a/base/task/sequence_manager/time_domain.cc
+++ b/base/task/sequence_manager/time_domain.cc
@@ -4,6 +4,7 @@
#include "base/task/sequence_manager/time_domain.h"
+#include "base/record_replay.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/task_queue_impl.h"
@@ -15,9 +16,13 @@ namespace sequence_manager {
TimeDomain::TimeDomain()
: sequence_manager_(nullptr),
- associated_thread_(MakeRefCounted<internal::AssociatedThreadId>()) {}
+ associated_thread_(MakeRefCounted<internal::AssociatedThreadId>()) {
+ // TimeDomains can be compared based on their pointer IDs, see sequence_manager_impl.h
+ recordreplay::RegisterPointer(this);
+}
TimeDomain::~TimeDomain() {
+ recordreplay::UnregisterPointer(this);
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
}
diff --git a/base/task/sequence_manager/work_deduplicator.cc b/base/task/sequence_manager/work_deduplicator.cc
index f72eb7c7d88a..bcd9c3dae3d9 100644
--- a/base/task/sequence_manager/work_deduplicator.cc
+++ b/base/task/sequence_manager/work_deduplicator.cc
@@ -6,6 +6,7 @@
#include <utility>
#include "base/check_op.h"
+#include "base/record_replay.h"
namespace base {
namespace sequence_manager {
@@ -13,12 +14,14 @@ namespace internal {
WorkDeduplicator::WorkDeduplicator(
scoped_refptr<AssociatedThreadId> associated_thread)
- : associated_thread_(std::move(associated_thread)) {}
+ : state_ordered_lock_id_(recordreplay::CreateOrderedLock("WorkDeduplicator.state_"))
+ , associated_thread_(std::move(associated_thread)) {}
WorkDeduplicator::~WorkDeduplicator() = default;
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::BindToCurrentThread() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
+ recordreplay::AutoOrderedLock ordered(state_ordered_lock_id_);
int previous_flags = state_.fetch_or(kBoundFlag);
DCHECK_EQ(previous_flags & kBoundFlag, 0) << "Can't bind twice!";
return previous_flags & kPendingDoWorkFlag
@@ -28,6 +31,7 @@ WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::BindToCurrentThread() {
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::OnWorkRequested() {
// Set kPendingDoWorkFlag and return true if we were previously kIdle.
+ recordreplay::AutoOrderedLock ordered(state_ordered_lock_id_);
return state_.fetch_or(kPendingDoWorkFlag) == State::kIdle
? ShouldScheduleWork::kScheduleImmediate
: ShouldScheduleWork::kNotNeeded;
@@ -42,16 +46,20 @@ WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::OnDelayedWorkRequested()
}
void WorkDeduplicator::OnWorkStarted() {
+ recordreplay::Assert("WorkDeduplicator::OnWorkStarted");
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(state_.load() & kBoundFlag, kBoundFlag);
// Clear kPendingDoWorkFlag and mark us as in a DoWork.
+ recordreplay::AutoOrderedLock ordered(state_ordered_lock_id_);
state_.store(State::kInDoWork);
}
void WorkDeduplicator::WillCheckForMoreWork() {
+ recordreplay::Assert("WorkDeduplicator::WillCheckForMoreWork");
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(state_.load() & kBoundFlag, kBoundFlag);
// Clear kPendingDoWorkFlag if it was set.
+ recordreplay::AutoOrderedLock ordered(state_ordered_lock_id_);
state_.store(State::kInDoWork);
}
@@ -59,6 +67,8 @@ WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::DidCheckForMoreWork(
NextTask next_task) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(state_.load() & kBoundFlag, kBoundFlag);
+ recordreplay::Assert("WorkDeduplicator::DidCheckForMoreWork %d",
+ next_task == NextTask::kIsImmediate);
if (next_task == NextTask::kIsImmediate) {
state_.store(State::kDoWorkPending);
return ShouldScheduleWork::kScheduleImmediate;
@@ -67,7 +77,10 @@ WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::DidCheckForMoreWork(
// OnWorkRequested() was invoked racily from another thread just after this
// thread determined that the next task wasn't immediate. In that case, that
// other thread relies on us to return kScheduleImmediate.
- return (state_.fetch_and(~kInDoWorkFlag) & kPendingDoWorkFlag)
+ recordreplay::AutoOrderedLock ordered(state_ordered_lock_id_);
+ bool v = (state_.fetch_and(~kInDoWorkFlag) & kPendingDoWorkFlag);
+ recordreplay::Assert("WorkDeduplicator::DidCheckForMoreWork #1 %d", v);
+ return v
? ShouldScheduleWork::kScheduleImmediate
: ShouldScheduleWork::kNotNeeded;
}
diff --git a/base/task/sequence_manager/work_deduplicator.h b/base/task/sequence_manager/work_deduplicator.h
index 5a21f3a696b4..7bc150bc1da4 100644
--- a/base/task/sequence_manager/work_deduplicator.h
+++ b/base/task/sequence_manager/work_deduplicator.h
@@ -134,6 +134,7 @@ class BASE_EXPORT WorkDeduplicator {
};
std::atomic<int> state_{State::kUnbound};
+ int state_ordered_lock_id_ = 0;
scoped_refptr<AssociatedThreadId> associated_thread_;
};
diff --git a/base/task/sequence_manager/work_queue.cc b/base/task/sequence_manager/work_queue.cc
index b3667285ad1a..4e1a182a29bf 100644
--- a/base/task/sequence_manager/work_queue.cc
+++ b/base/task/sequence_manager/work_queue.cc
@@ -5,6 +5,7 @@
#include "base/task/sequence_manager/work_queue.h"
#include "base/debug/alias.h"
+#include "base/record_replay.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/work_queue_sets.h"
#include "build/build_config.h"
@@ -16,7 +17,10 @@ namespace internal {
WorkQueue::WorkQueue(TaskQueueImpl* task_queue,
const char* name,
QueueType queue_type)
- : task_queue_(task_queue), name_(name), queue_type_(queue_type) {}
+ : task_queue_(task_queue), name_(name), queue_type_(queue_type) {
+ recordreplay::RegisterPointer(this);
+ recordreplay::Assert("WorkQueue::WorkQueue %lu", recordreplay::PointerId(this));
+}
Value WorkQueue::AsValue(TimeTicks now) const {
Value state(Value::Type::LIST);
@@ -26,6 +30,7 @@ Value WorkQueue::AsValue(TimeTicks now) const {
}
WorkQueue::~WorkQueue() {
+ recordreplay::UnregisterPointer(this);
DCHECK(!work_queue_sets_) << task_queue_->GetName() << " : "
<< work_queue_sets_->GetName() << " : " << name_;
}
@@ -53,17 +58,25 @@ bool WorkQueue::BlockedByFence() const {
}
bool WorkQueue::GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const {
- if (tasks_.empty() || BlockedByFence())
+ if (tasks_.empty() || BlockedByFence()) {
+ recordreplay::Assert("WorkQueue::GetFrontTaskEnqueueOrder #1 %lu",
+ recordreplay::PointerId((void*)this));
return false;
+ }
// Quick sanity check.
DCHECK_LE(tasks_.front().enqueue_order(), tasks_.back().enqueue_order())
<< task_queue_->GetName() << " : " << work_queue_sets_->GetName() << " : "
<< name_;
*enqueue_order = tasks_.front().enqueue_order();
+ recordreplay::Assert("WorkQueue::GetFrontTaskEnqueueOrder #2 %lu %lu",
+ recordreplay::PointerId((void*)this), (size_t)*enqueue_order);
return true;
}
void WorkQueue::Push(Task task) {
+ recordreplay::Assert("WorkQueue::Push %lu %lu",
+ recordreplay::PointerId(this), (size_t)task.enqueue_order());
+
bool was_empty = tasks_.empty();
#ifndef NDEBUG
DCHECK(task.enqueue_order_set());
@@ -92,6 +105,10 @@ WorkQueue::TaskPusher::TaskPusher(TaskPusher&& other)
}
void WorkQueue::TaskPusher::Push(Task* task) {
+ recordreplay::Assert("WorkQueue::TaskPusher::Push %lu %lu",
+ recordreplay::PointerId(work_queue_),
+ (size_t)task->enqueue_order());
+
DCHECK(work_queue_);
#ifndef NDEBUG
@@ -116,10 +133,16 @@ WorkQueue::TaskPusher::~TaskPusher() {
}
WorkQueue::TaskPusher WorkQueue::CreateTaskPusher() {
+ recordreplay::Assert("WorkQueue::CreateTaskPusher %lu",
+ recordreplay::PointerId(this));
return TaskPusher(this);
}
void WorkQueue::PushNonNestableTaskToFront(Task task) {
+ recordreplay::Assert("WorkQueue::PushNonNestableTaskToFront %lu %lu",
+ recordreplay::PointerId(this),
+ (size_t)task.enqueue_order());
+
DCHECK(task.nestable == Nestable::kNonNestable);
bool was_empty = tasks_.empty();
@@ -169,11 +192,16 @@ Task WorkQueue::TakeTaskFromWorkQueue() {
DCHECK(work_queue_sets_);
DCHECK(!tasks_.empty());
+ recordreplay::Assert("WorkQueue::TakeTaskFromWorkQueue Start %lu",
+ recordreplay::PointerId(this));
+
Task pending_task = std::move(tasks_.front());
tasks_.pop_front();
// NB immediate tasks have a different pipeline to delayed ones.
+ recordreplay::Assert("WorkQueue::TakeTaskFromWorkQueue #0 %d", tasks_.empty());
if (tasks_.empty()) {
// NB delayed tasks are inserted via Push, no don't need to reload those.
+ recordreplay::Assert("WorkQueue::TakeTaskFromWorkQueue #1 %d", queue_type_);
if (queue_type_ == QueueType::kImmediate) {
// Short-circuit the queue reload so that OnPopMinQueueInSet does the
// right thing.
@@ -195,6 +223,8 @@ Task WorkQueue::TakeTaskFromWorkQueue() {
work_queue_sets_->OnPopMinQueueInSet(this);
#endif
task_queue_->TraceQueueSize();
+
+ recordreplay::Assert("WorkQueue::TakeTaskFromWorkQueue Done");
return pending_task;
}
@@ -229,6 +259,7 @@ bool WorkQueue::RemoveAllCanceledTasksFromFront() {
break;
tasks_.pop_front();
task_removed = true;
+ recordreplay::Assert("WorkQueue::RemoveAllCanceledTasksFromFront #1");
}
if (task_removed) {
if (tasks_.empty()) {
diff --git a/base/task/sequence_manager/work_queue_sets.cc b/base/task/sequence_manager/work_queue_sets.cc
index 937b84b23d60..c583b9164c8e 100644
--- a/base/task/sequence_manager/work_queue_sets.cc
+++ b/base/task/sequence_manager/work_queue_sets.cc
@@ -5,6 +5,7 @@
#include "base/task/sequence_manager/work_queue_sets.h"
#include "base/check_op.h"
+#include "base/record_replay.h"
namespace base {
namespace sequence_manager {
@@ -23,6 +24,8 @@ WorkQueueSets::WorkQueueSets(const char* name,
WorkQueueSets::~WorkQueueSets() = default;
void WorkQueueSets::AddQueue(WorkQueue* work_queue, size_t set_index) {
+ recordreplay::Assert("WorkQueueSets::AddQueue %lu %lu",
+ recordreplay::PointerId(work_queue), set_index);
DCHECK(!work_queue->work_queue_sets());
DCHECK_LT(set_index, work_queue_heaps_.size());
DCHECK(!work_queue->heap_handle().IsValid());
@@ -39,6 +42,8 @@ void WorkQueueSets::AddQueue(WorkQueue* work_queue, size_t set_index) {
}
void WorkQueueSets::RemoveQueue(WorkQueue* work_queue) {
+ recordreplay::Assert("WorkQueueSets::RemoveQueue %lu",
+ recordreplay::PointerId(work_queue));
DCHECK_EQ(this, work_queue->work_queue_sets());
work_queue->AssignToWorkQueueSets(nullptr);
if (!work_queue->heap_handle().IsValid())
@@ -52,6 +57,8 @@ void WorkQueueSets::RemoveQueue(WorkQueue* work_queue) {
}
void WorkQueueSets::ChangeSetIndex(WorkQueue* work_queue, size_t set_index) {
+ recordreplay::Assert("WorkQueueSets::ChangeSetIndex %lu %lu",
+ recordreplay::PointerId(work_queue), set_index);
DCHECK_EQ(this, work_queue->work_queue_sets());
DCHECK_LT(set_index, work_queue_heaps_.size());
EnqueueOrder enqueue_order;
@@ -73,6 +80,8 @@ void WorkQueueSets::ChangeSetIndex(WorkQueue* work_queue, size_t set_index) {
}
void WorkQueueSets::OnQueuesFrontTaskChanged(WorkQueue* work_queue) {
+ recordreplay::Assert("WorkQueueSets::OnQueuesFrontTaskChanged %lu",
+ recordreplay::PointerId(work_queue));
EnqueueOrder enqueue_order;
size_t set_index = work_queue->work_queue_set_index();
DCHECK_EQ(this, work_queue->work_queue_sets());
@@ -93,6 +102,8 @@ void WorkQueueSets::OnQueuesFrontTaskChanged(WorkQueue* work_queue) {
}
void WorkQueueSets::OnTaskPushedToEmptyQueue(WorkQueue* work_queue) {
+ recordreplay::Assert("WorkQueueSets::OnTaskPushedToEmptyQueue %lu",
+ recordreplay::PointerId(work_queue));
// NOTE if this function changes, we need to keep |WorkQueueSets::AddQueue| in
// sync.
DCHECK_EQ(this, work_queue->work_queue_sets());
@@ -111,6 +122,8 @@ void WorkQueueSets::OnTaskPushedToEmptyQueue(WorkQueue* work_queue) {
}
void WorkQueueSets::OnPopMinQueueInSet(WorkQueue* work_queue) {
+ recordreplay::Assert("WorkQueueSets::OnPopMinQueueInSet %lu",
+ recordreplay::PointerId(work_queue));
// Assume that |work_queue| contains the lowest enqueue_order.
size_t set_index = work_queue->work_queue_set_index();
DCHECK_EQ(this, work_queue->work_queue_sets());
@@ -136,6 +149,8 @@ void WorkQueueSets::OnPopMinQueueInSet(WorkQueue* work_queue) {
}
void WorkQueueSets::OnQueueBlocked(WorkQueue* work_queue) {
+ recordreplay::Assert("WorkQueueSets::OnQueueBlocked %lu",
+ recordreplay::PointerId(work_queue));
DCHECK_EQ(this, work_queue->work_queue_sets());
base::internal::HeapHandle heap_handle = work_queue->heap_handle();
if (!heap_handle.IsValid())
@@ -148,18 +163,22 @@ void WorkQueueSets::OnQueueBlocked(WorkQueue* work_queue) {
}
WorkQueue* WorkQueueSets::GetOldestQueueInSet(size_t set_index) const {
+ recordreplay::Assert("WorkQueueSets::GetOldestQueueInSet Start %lu", set_index);
DCHECK_LT(set_index, work_queue_heaps_.size());
if (work_queue_heaps_[set_index].empty())
return nullptr;
WorkQueue* queue = work_queue_heaps_[set_index].Min().value;
DCHECK_EQ(set_index, queue->work_queue_set_index());
DCHECK(queue->heap_handle().IsValid());
+ recordreplay::Assert("WorkQueueSets::GetOldestQueueInSet Done %lu",
+ recordreplay::PointerId(queue));
return queue;
}
WorkQueue* WorkQueueSets::GetOldestQueueAndEnqueueOrderInSet(
size_t set_index,
EnqueueOrder* out_enqueue_order) const {
+ recordreplay::Assert("WorkQueueSets::GetOldestQueueAndEnqueueOrderInSet Start %lu", set_index);
DCHECK_LT(set_index, work_queue_heaps_.size());
if (work_queue_heaps_[set_index].empty())
return nullptr;
@@ -169,6 +188,8 @@ WorkQueue* WorkQueueSets::GetOldestQueueAndEnqueueOrderInSet(
EnqueueOrder enqueue_order;
DCHECK(oldest.value->GetFrontTaskEnqueueOrder(&enqueue_order) &&
oldest.key == enqueue_order);
+ recordreplay::Assert("WorkQueueSets::GetOldestQueueAndEnqueueOrderInSet Done %lu",
+ recordreplay::PointerId(oldest.value));
return oldest.value;
}
@@ -240,6 +261,8 @@ bool WorkQueueSets::ContainsWorkQueueForTest(
void WorkQueueSets::CollectSkippedOverLowerPriorityTasks(
const internal::WorkQueue* selected_work_queue,
std::vector<const Task*>* result) const {
+ recordreplay::Assert("WorkQueueSets::CollectSkippedOverLowerPriorityTasks %lu",
+ recordreplay::PointerId((void*)selected_work_queue));
EnqueueOrder selected_enqueue_order;
CHECK(selected_work_queue->GetFrontTaskEnqueueOrder(&selected_enqueue_order));
for (size_t priority = selected_work_queue->work_queue_set_index() + 1;
diff --git a/base/task/thread_pool/task_tracker.cc b/base/task/thread_pool/task_tracker.cc
index 06edee9b13bf..9baaa1d09f7a 100644
--- a/base/task/thread_pool/task_tracker.cc
+++ b/base/task/thread_pool/task_tracker.cc
@@ -18,6 +18,7 @@
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
#include "base/optional.h"
+#include "base/record_replay.h"
#include "base/sequence_token.h"
#include "base/strings/string_util.h"
#include "base/synchronization/condition_variable.h"
@@ -405,6 +406,8 @@ RegisteredTaskSource TaskTracker::RunAndPopNextTask(
RegisteredTaskSource task_source) {
DCHECK(task_source);
+ recordreplay::Assert("TaskTracker::RunAndPopNextTask Start");
+
const bool should_run_tasks = BeforeRunTask(task_source->shutdown_behavior());
// Run the next task in |task_source|.
@@ -425,6 +428,8 @@ RegisteredTaskSource TaskTracker::RunAndPopNextTask(
AfterRunTask(task_source->shutdown_behavior());
const bool task_source_must_be_queued = task_source.DidProcessTask();
// |task_source| should be reenqueued iff requested by DidProcessTask().
+ recordreplay::Assert("TaskTracker::RunAndPopNextTask Done %d",
+ task_source_must_be_queued);
if (task_source_must_be_queued)
return task_source;
return nullptr;
diff --git a/base/task/thread_pool/thread_group.cc b/base/task/thread_pool/thread_group.cc
index 4832a40b2f65..5712dcb2aaa1 100644
--- a/base/task/thread_pool/thread_group.cc
+++ b/base/task/thread_pool/thread_group.cc
@@ -75,8 +75,13 @@ ThreadGroup::ThreadGroup(TrackedRef<TaskTracker> task_tracker,
ThreadGroup* predecessor_thread_group)
: task_tracker_(std::move(task_tracker)),
delegate_(std::move(delegate)),
- lock_(predecessor_thread_group ? &predecessor_thread_group->lock_
- : nullptr) {
+ lock_("ThreadGroup.lock_"
+ // We ought to have a CheckedLock ctor that takes both an ordered
+ // name and a predecessor, but since we don't support debug builds
+ // when recording/replaying currently it doesn't seem worth the
+ // hassle.
+ /*predecessor_thread_group ? &predecessor_thread_group->lock_
+ : nullptr*/) {
DCHECK(task_tracker_);
}
diff --git a/base/task/thread_pool/thread_group_impl.cc b/base/task/thread_pool/thread_group_impl.cc
index 13d74f7397c0..80664eb7c12f 100644
--- a/base/task/thread_pool/thread_group_impl.cc
+++ b/base/task/thread_pool/thread_group_impl.cc
@@ -23,6 +23,7 @@
#include "base/numerics/clamped_math.h"
#include "base/optional.h"
#include "base/ranges/algorithm.h"
+#include "base/record_replay.h"
#include "base/sequence_token.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
@@ -140,6 +141,8 @@ class ThreadGroupImpl::ScopedCommandsExecutor
void AddWorker(scoped_refptr<WorkerThread> worker) {
if (!worker)
return;
+ recordreplay::Assert("WorkerContainer::AddWorker %lu",
+ recordreplay::PointerId(worker.get()));
if (!first_worker_)
first_worker_ = std::move(worker);
else
@@ -149,9 +152,14 @@ class ThreadGroupImpl::ScopedCommandsExecutor
template <typename Action>
void ForEachWorker(Action action) {
if (first_worker_) {
+ recordreplay::Assert("WorkerContainer::ForEachWorker #1 %lu",
+ recordreplay::PointerId(first_worker_.get()));
action(first_worker_.get());
- for (scoped_refptr<WorkerThread> worker : additional_workers_)
+ for (scoped_refptr<WorkerThread> worker : additional_workers_) {
+ recordreplay::Assert("WorkerContainer::ForEachWorker #2 %lu",
+ recordreplay::PointerId(worker.get()));
action(worker.get());
+ }
} else {
DCHECK(additional_workers_.empty());
}
@@ -577,6 +585,8 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::OnMainEntry(
RegisteredTaskSource ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork(
WorkerThread* worker) {
+ recordreplay::Assert("WorkerThreadDelegateImpl::GetWork Start");
+
DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
DCHECK(!worker_only().is_running_task);
@@ -585,8 +595,10 @@ RegisteredTaskSource ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork(
DCHECK(ContainsWorker(outer_->workers_, worker));
- if (!CanGetWorkLockRequired(&executor, worker))
+ if (!CanGetWorkLockRequired(&executor, worker)) {
+ recordreplay::Assert("WorkerThreadDelegateImpl::GetWork #1");
return nullptr;
+ }
// Use this opportunity, before assigning work to this worker, to create/wake
// additional workers if needed (doing this here allows us to reduce
@@ -615,6 +627,7 @@ RegisteredTaskSource ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork(
}
if (!task_source) {
OnWorkerBecomesIdleLockRequired(worker);
+ recordreplay::Assert("WorkerThreadDelegateImpl::GetWork #2");
return nullptr;
}
@@ -630,6 +643,7 @@ RegisteredTaskSource ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork(
outer_->EnsureEnoughWorkersLockRequired(&executor);
}
+ recordreplay::Assert("WorkerThreadDelegateImpl::GetWork Done");
return task_source;
}
diff --git a/base/task/thread_pool/worker_thread.cc b/base/task/thread_pool/worker_thread.cc
index 105c89f023ba..e70138b40c30 100644
--- a/base/task/thread_pool/worker_thread.cc
+++ b/base/task/thread_pool/worker_thread.cc
@@ -11,6 +11,7 @@
#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/debug/alias.h"
+#include "base/record_replay.h"
#include "base/task/thread_pool/environment_config.h"
#include "base/task/thread_pool/task_tracker.h"
#include "base/task/thread_pool/worker_thread_observer.h"
@@ -46,6 +47,7 @@ WorkerThread::WorkerThread(ThreadPriority priority_hint,
task_tracker_(std::move(task_tracker)),
priority_hint_(priority_hint),
current_thread_priority_(GetDesiredThreadPriority()) {
+ recordreplay::RegisterPointer(this);
DCHECK(delegate_);
DCHECK(task_tracker_);
DCHECK(CanUseBackgroundPriorityForWorkerThread() ||
@@ -117,6 +119,7 @@ bool WorkerThread::ThreadAliveForTesting() const {
}
WorkerThread::~WorkerThread() {
+ recordreplay::UnregisterPointer(this);
CheckedAutoLock auto_lock(thread_lock_);
// If |thread_handle_| wasn't joined, detach it.
@@ -292,6 +295,8 @@ NOINLINE void WorkerThread::RunBackgroundDedicatedCOMWorker() {
#endif // defined(OS_WIN)
void WorkerThread::RunWorker() {
+ recordreplay::Assert("WorkerThread::RunWorker Start");
+
DCHECK_EQ(self_, this);
TRACE_EVENT_INSTANT0("base", "WorkerThread born", TRACE_EVENT_SCOPE_THREAD);
TRACE_EVENT_BEGIN0("base", "WorkerThread active");
@@ -299,8 +304,12 @@ void WorkerThread::RunWorker() {
if (worker_thread_observer_)
worker_thread_observer_->OnWorkerThreadMainEntry();
+ recordreplay::Assert("WorkerThread::RunWorker #1");
+
delegate_->OnMainEntry(this);
+ recordreplay::Assert("WorkerThread::RunWorker #2");
+
// Background threads can take an arbitrary amount of time to complete, do not
// watch them for hangs. Ignore priority boosting for now.
const bool watch_for_hangs =
@@ -316,9 +325,11 @@ void WorkerThread::RunWorker() {
// A WorkerThread starts out waiting for work.
{
+ recordreplay::Assert("WorkerThread::RunWorker #3");
TRACE_EVENT_END0("base", "WorkerThread active");
delegate_->WaitForWork(&wake_up_event_);
TRACE_EVENT_BEGIN0("base", "WorkerThread active");
+ recordreplay::Assert("WorkerThread::RunWorker #4");
}
while (!ShouldExit()) {
@@ -341,7 +352,9 @@ void WorkerThread::RunWorker() {
TRACE_EVENT_END0("base", "WorkerThread active");
hang_watch_scope.reset();
+ recordreplay::Assert("WorkerThread::RunWorker #5");
delegate_->WaitForWork(&wake_up_event_);
+ recordreplay::Assert("WorkerThread::RunWorker #6");
TRACE_EVENT_BEGIN0("base", "WorkerThread active");
continue;
}
diff --git a/base/task_runner.cc b/base/task_runner.cc
index 348c79f35b2a..f29b5e7c3a37 100644
--- a/base/task_runner.cc
+++ b/base/task_runner.cc
@@ -9,8 +9,37 @@
#include "base/bind.h"
#include "base/check.h"
#include "base/compiler_specific.h"
+#include "base/record_replay.h"
#include "base/threading/post_task_and_reply_impl.h"
+#ifndef NACL_TC_REV
+
+#include <dlfcn.h>
+
+// There are linker problems if we try to use recordreplay::Assert here.
+static void (*gRecordReplayAssertFn)(const char*, va_list);
+
+static void RecordReplayAssert(const char* aFormat, ...) {
+ if (!gRecordReplayAssertFn) {
+ void* fnptr = dlsym(RTLD_DEFAULT, "RecordReplayAssert");
+ if (!fnptr) {
+ return;
+ }
+ gRecordReplayAssertFn = reinterpret_cast<void(*)(const char*, va_list)>(fnptr);
+ }
+
+ va_list ap;
+ va_start(ap, aFormat);
+ gRecordReplayAssertFn(aFormat, ap);
+ va_end(ap);
+}
+
+#else
+
+static void RecordReplayAssert(const char* aFormat, ...) {}
+
+#endif
+
namespace base {
namespace {
@@ -42,7 +71,10 @@ bool PostTaskAndReplyTaskRunner::PostTask(const Location& from_here,
} // namespace
bool TaskRunner::PostTask(const Location& from_here, OnceClosure task) {
- return PostDelayedTask(from_here, std::move(task), base::TimeDelta());
+ RecordReplayAssert("TaskRunner::PostTask Start");
+ bool rv = PostDelayedTask(from_here, std::move(task), base::TimeDelta());
+ RecordReplayAssert("TaskRunner::PostTask Done %d", rv);
+ return rv;
}
bool TaskRunner::PostTaskAndReply(const Location& from_here,
diff --git a/base/time/time_mac.cc b/base/time/time_mac.cc
index e3babbc0ba3e..f8a03ce008e5 100644
--- a/base/time/time_mac.cc
+++ b/base/time/time_mac.cc
@@ -107,6 +107,11 @@ int64_t ComputeThreadTicks() {
return 0;
}
+ // Calling thread_info is currently unsupported when recording/replaying.
+ if (getenv("RECORD_REPLAY_DRIVER")) {
+ return 0;
+ }
+
mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t thread_info_data;
diff --git a/cc/mojo_embedder/async_layer_tree_frame_sink.cc b/cc/mojo_embedder/async_layer_tree_frame_sink.cc
index 452bb7371a98..b7d6c19bd31a 100644
--- a/cc/mojo_embedder/async_layer_tree_frame_sink.cc
+++ b/cc/mojo_embedder/async_layer_tree_frame_sink.cc
@@ -10,6 +10,7 @@
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
+#include "base/record_replay.h"
#include "base/trace_event/trace_event.h"
#include "cc/base/histograms.h"
#include "cc/trees/layer_tree_frame_sink_client.h"
@@ -17,6 +18,7 @@
#include "components/viz/common/frame_sinks/begin_frame_args.h"
#include "components/viz/common/hit_test/hit_test_region_list.h"
#include "components/viz/common/quads/compositor_frame.h"
+#include "components/viz/service/display/record_replay_render.h"
namespace cc {
namespace mojo_embedder {
@@ -196,6 +198,10 @@ void AsyncLayerTreeFrameSink::SubmitCompositorFrame(
TRACE_EVENT_FLAG_FLOW_OUT, "step",
"SubmitHitTestData");
+ if (recordreplay::IsRecordingOrReplaying()) {
+ RecordReplaySubmitCompositorFrame(local_surface_id_, frame);
+ }
+
compositor_frame_sink_ptr_->SubmitCompositorFrame(
local_surface_id_, std::move(frame), std::move(hit_test_region_list), 0);
}
diff --git a/cc/raster/bitmap_raster_buffer_provider.cc b/cc/raster/bitmap_raster_buffer_provider.cc
index 397fe0867be1..218cae693526 100644
--- a/cc/raster/bitmap_raster_buffer_provider.cc
+++ b/cc/raster/bitmap_raster_buffer_provider.cc
@@ -11,6 +11,7 @@
#include <utility>
#include "base/memory/shared_memory_mapping.h"
+#include "base/record_replay.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
@@ -19,6 +20,7 @@
#include "cc/trees/layer_tree_frame_sink.h"
#include "components/viz/common/resources/bitmap_allocation.h"
#include "components/viz/common/resources/platform_color.h"
+#include "components/viz/service/display/record_replay_render.h"
namespace cc {
namespace {
@@ -120,6 +122,12 @@ BitmapRasterBufferProvider::AcquireBufferForRaster(
frame_sink_->DidAllocateSharedBitmap(std::move(shm.region),
backing->shared_bitmap_id);
+ if (recordreplay::IsRecordingOrReplaying()) {
+ viz::RecordReplayNotifyRasterBuffer(backing->shared_bitmap_id,
+ backing->mapping.memory(),
+ backing->mapping.size());
+ }
+
resource.set_software_backing(std::move(backing));
}
BitmapSoftwareBacking* backing =
diff --git a/cc/raster/raster_query_queue.cc b/cc/raster/raster_query_queue.cc
index b77d3c69b97b..5af693cbd1a7 100644
--- a/cc/raster/raster_query_queue.cc
+++ b/cc/raster/raster_query_queue.cc
@@ -21,7 +21,8 @@ RasterQueryQueue::RasterQueryQueue(
viz::RasterContextProvider* const worker_context_provider,
bool oop_rasterization_enabled)
: worker_context_provider_(worker_context_provider),
- oop_rasterization_enabled_(oop_rasterization_enabled) {}
+ oop_rasterization_enabled_(oop_rasterization_enabled),
+ pending_raster_queries_lock_("RasterQueryQueue.pending_raster_queries_lock_") {}
RasterQueryQueue::~RasterQueryQueue() = default;
diff --git a/cc/scheduler/scheduler.cc b/cc/scheduler/scheduler.cc
index 8f51ef9b4671..6cd951f9f827 100644
--- a/cc/scheduler/scheduler.cc
+++ b/cc/scheduler/scheduler.cc
@@ -12,6 +12,7 @@
#include "base/bind.h"
#include "base/check_op.h"
#include "base/location.h"
+#include "base/record_replay.h"
#include "base/single_thread_task_runner.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/traced_value.h"
@@ -736,6 +737,8 @@ void Scheduler::ScheduleBeginImplFrameDeadline() {
}
void Scheduler::OnBeginImplFrameDeadline() {
+ recordreplay::Assert("Scheduler::OnBeginImplFrameDeadline Start");
+
TRACE_EVENT0("cc,benchmark", "Scheduler::OnBeginImplFrameDeadline");
begin_impl_frame_deadline_task_.Cancel();
// We split the deadline actions up into two phases so the state machine
@@ -757,6 +760,8 @@ void Scheduler::OnBeginImplFrameDeadline() {
FinishImplFrameSynchronous();
else
FinishImplFrame();
+
+ recordreplay::Assert("Scheduler::OnBeginImplFrameDeadline Done");
}
void Scheduler::FinishImplFrameSynchronous() {
diff --git a/cc/tiles/tile_manager.cc b/cc/tiles/tile_manager.cc
index 474ac906a21d..2ef6d735aaa9 100644
--- a/cc/tiles/tile_manager.cc
+++ b/cc/tiles/tile_manager.cc
@@ -17,6 +17,7 @@
#include "base/metrics/histogram.h"
#include "base/numerics/safe_conversions.h"
#include "base/optional.h"
+#include "base/record_replay.h"
#include "base/threading/thread_checker.h"
#include "base/trace_event/traced_value.h"
#include "cc/base/devtools_instrumentation.h"
@@ -349,9 +350,11 @@ class DidFinishRunningAllTilesTask : public TileTask {
void RunOnWorkerThread() override {
TRACE_EVENT0("cc", "TaskSetFinishedTaskImpl::RunOnWorkerThread");
bool has_pending_queries = false;
+ recordreplay::Assert("DidFinishRunningAllTilesTask::RunOnWorkerThread Start %d", !!pending_raster_queries_);
if (pending_raster_queries_) {
has_pending_queries =
pending_raster_queries_->CheckRasterFinishedQueries();
+ recordreplay::Assert("DidFinishRunningAllTilesTask::RunOnWorkerThread #1 %d", has_pending_queries);
}
task_runner_->PostTask(FROM_HERE, base::BindOnce(std::move(completion_cb_),
has_pending_queries));
@@ -520,6 +523,8 @@ void TileManager::DidFinishRunningAllTileTasks(bool has_pending_queries) {
DCHECK(resource_pool_);
DCHECK(tile_task_manager_);
+ recordreplay::Assert("TileManager::DidFinishRunningAllTileTasks %d", has_pending_queries);
+
has_scheduled_tile_tasks_ = false;
has_pending_queries_ = has_pending_queries;
@@ -1462,14 +1467,20 @@ bool TileManager::IsReadyToDraw() const {
void TileManager::ScheduleCheckRasterFinishedQueries() {
DCHECK(has_pending_queries_);
- if (!check_pending_tile_queries_callback_.IsCancelled())
+ recordreplay::Assert("TileManager::ScheduleCheckRasterFinishedQueries Start");
+
+ if (!check_pending_tile_queries_callback_.IsCancelled()) {
+ recordreplay::Assert("TileManager::ScheduleCheckRasterFinishedQueries #1");
return;
+ }
check_pending_tile_queries_callback_.Reset(base::BindOnce(
&TileManager::CheckRasterFinishedQueries, base::Unretained(this)));
task_runner_->PostDelayedTask(FROM_HERE,
check_pending_tile_queries_callback_.callback(),
base::TimeDelta::FromMilliseconds(100));
+
+ recordreplay::Assert("TileManager::ScheduleCheckRasterFinishedQueries Done");
}
void TileManager::CheckRasterFinishedQueries() {
@@ -1487,6 +1498,9 @@ void TileManager::CheckRasterFinishedQueries() {
has_pending_queries_ =
pending_raster_queries_->CheckRasterFinishedQueries();
}
+
+ recordreplay::Assert("TileManager::CheckRasterFinishedQueries #1 %d", has_pending_queries_);
+
if (has_pending_queries_)
ScheduleCheckRasterFinishedQueries();
}
@@ -1501,6 +1515,8 @@ void TileManager::FlushAndIssueSignals() {
}
void TileManager::IssueSignals() {
+ recordreplay::Assert("TileManager::IssueSignals Start");
+
// Ready to activate.
if (signals_.activate_tile_tasks_completed &&
signals_.activate_gpu_work_completed &&
@@ -1524,13 +1540,19 @@ void TileManager::IssueSignals() {
}
}
+ recordreplay::Assert("TileManager::IssueSignals #1 %d %d",
+ signals_.all_tile_tasks_completed,
+ signals_.did_notify_all_tile_tasks_completed);
+
// All tile tasks completed.
if (signals_.all_tile_tasks_completed &&
!signals_.did_notify_all_tile_tasks_completed) {
+ recordreplay::Assert("TileManager::IssueSignals #2 %d", has_scheduled_tile_tasks_);
if (!has_scheduled_tile_tasks_) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
"TileManager::IssueSignals - all tile tasks completed");
+ recordreplay::Assert("TileManager::IssueSignals #3 %d", has_pending_queries_);
if (has_pending_queries_)
ScheduleCheckRasterFinishedQueries();
@@ -1551,6 +1573,8 @@ void TileManager::IssueSignals() {
checker_image_tracker_.SetMaxDecodePriorityAllowed(
CheckerImageTracker::DecodeType::kRaster);
}
+
+ recordreplay::Assert("TileManager::IssueSignals Done");
}
void TileManager::CheckIfMoreTilesNeedToBePrepared() {
diff --git a/cc/trees/proxy_main.cc b/cc/trees/proxy_main.cc
index 0fa28a69c060..710f525ff21d 100644
--- a/cc/trees/proxy_main.cc
+++ b/cc/trees/proxy_main.cc
@@ -11,6 +11,7 @@
#include <vector>
#include "base/bind.h"
+#include "base/record_replay.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/traced_value.h"
#include "cc/base/completion_event.h"
@@ -26,6 +27,7 @@
#include "cc/trees/render_frame_metadata_observer.h"
#include "cc/trees/scoped_abort_remaining_swap_promises.h"
#include "cc/trees/swap_promise.h"
+#include "components/viz/service/display/record_replay_render.h"
#include "services/metrics/public/cpp/ukm_recorder.h"
namespace cc {
@@ -377,6 +379,10 @@ void ProxyMain::BeginMainFrame(
// blink::LocalFrameView::RunPostLifecycleSteps.
layer_tree_host_->DidBeginMainFrame();
+ if (recordreplay::IsRecordingOrReplaying()) {
+ viz::RecordReplayOnCommitPaint();
+ }
+
layer_tree_host_->RecordEndOfFrameMetrics(
begin_main_frame_start_time,
begin_main_frame_state->active_sequence_trackers);
diff --git a/chrome/app/chrome_exe_main_mac.cc b/chrome/app/chrome_exe_main_mac.cc
index 180dcdebb40b..c1301704898d 100644
--- a/chrome/app/chrome_exe_main_mac.cc
+++ b/chrome/app/chrome_exe_main_mac.cc
@@ -50,7 +50,71 @@ typedef int (*ChromeMainPtr)(int, char**);
} // namespace
+static void (*gRecordReplayAttach)(const char* dispatchAddress, const char* buildId);
+static void (*gRecordReplayRecordCommandLineArguments)(int*, char***);
+
+template <typename Src, typename Dst>
+static inline void CastPointer(const Src src, Dst* dst) {
+ static_assert(sizeof(Src) == sizeof(uintptr_t), "bad size");
+ static_assert(sizeof(Dst) == sizeof(uintptr_t), "bad size");
+ memcpy((void*)dst, (const void*)&src, sizeof(uintptr_t));
+}
+
+template <typename T>
+static void RecordReplayLoadSymbol(void* handle, const char* name, T& function) {
+ void* sym = dlsym(handle, name);
+ if (!sym) {
+ fprintf(stderr, "Could not find %s in Record Replay driver.\n", name);
+ return;
+ }
+
+ CastPointer(sym, &function);
+}
+
+static const char* gBuildId = "macOS-chromium-experimental";
+
+static void RecordReplayAttach(int* pargc, char*** pargv) {
+ // Only renderer processes are recorded/replayed.
+ bool renderer = false;
+ for (int i = 0; i < *pargc; i++) {
+ if (!strcmp((*pargv)[i], "--type=renderer")) {
+ renderer = true;
+ }
+ }
+ if (!renderer) {
+ return;
+ }
+
+ const char* driver = getenv("RECORD_REPLAY_DRIVER");
+ if (!driver) {
+ return;
+ }
+
+ const char* dispatchAddress = getenv("RECORD_REPLAY_DISPATCH");
+ if (!dispatchAddress) {
+ fprintf(stderr, "RECORD_REPLAY_DISPATCH not set.\n");
+ return;
+ }
+
+ void* handle = dlopen(driver, RTLD_LAZY);
+ if (!handle) {
+ fprintf(stderr, "Loading Record Replay driver failed.\n");
+ return;
+ }
+
+ RecordReplayLoadSymbol(handle, "RecordReplayAttach", gRecordReplayAttach);
+ RecordReplayLoadSymbol(handle, "RecordReplayRecordCommandLineArguments",
+ gRecordReplayRecordCommandLineArguments);
+
+ if (gRecordReplayAttach) {
+ gRecordReplayAttach(dispatchAddress, gBuildId);
+ gRecordReplayRecordCommandLineArguments(pargc, pargv);
+ }
+}
+
__attribute__((visibility("default"))) int main(int argc, char* argv[]) {
+ RecordReplayAttach(&argc, &argv);
+
uint32_t exec_path_size = 0;
int rv = _NSGetExecutablePath(NULL, &exec_path_size);
if (rv != -1) {
diff --git a/chrome/app/chrome_main.cc b/chrome/app/chrome_main.cc
index d353778d170f..8accad1c4ce5 100644
--- a/chrome/app/chrome_main.cc
+++ b/chrome/app/chrome_main.cc
@@ -4,6 +4,9 @@
#include <stdint.h>
+#define _GNU_SOURCE
+#include <dlfcn.h>
+
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/command_line.h"
@@ -56,6 +59,28 @@ int ChromeMain(int argc, const char** argv);
}
#endif
+extern "C" void V8SetRecordingOrReplaying(void* handle);
+
+static void MaybeSetRecordingOrReplaying(int argc, const char** argv) {
+ // Only renderer processes are recorded/replayed.
+ bool renderer = false;
+ for (int i = 0; i < argc; i++) {
+ if (!strcmp(argv[i], "--type=renderer")) {
+ renderer = true;
+ }
+ }
+ if (!renderer) {
+ return;
+ }
+
+ const char* driver = getenv("RECORD_REPLAY_DRIVER");
+ if (driver) {
+ void* handle = dlopen(driver, RTLD_LAZY);
+ CHECK(handle);
+ V8SetRecordingOrReplaying(handle);
+ }
+}
+
#if defined(OS_WIN)
DLLEXPORT int __cdecl ChromeMain(
HINSTANCE instance,
@@ -67,6 +92,8 @@ int ChromeMain(int argc, const char** argv) {
int64_t exe_entry_point_ticks = 0;
#endif
+ MaybeSetRecordingOrReplaying(argc, argv);
+
#if defined(OS_WIN)
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
// Call this early on in order to configure heap workarounds. This must be
diff --git a/components/crash/core/common/objc_zombie.mm b/components/crash/core/common/objc_zombie.mm
index 9f4104949d6e..aacb202917c5 100644
--- a/components/crash/core/common/objc_zombie.mm
+++ b/components/crash/core/common/objc_zombie.mm
@@ -262,6 +262,10 @@ BOOL ZombieInit() {
if (initialized)
return YES;
+ // Don't alter internal classes when recording/replaying.
+ if (getenv("RECORD_REPLAY_DRIVER"))
+ return NO;
+
Class rootClass = [NSObject class];
g_originalDeallocIMP = reinterpret_cast<RealIMP>(
class_getMethodImplementation(rootClass, @selector(dealloc)));
diff --git a/components/variations/child_process_field_trial_syncer.cc b/components/variations/child_process_field_trial_syncer.cc
index 6825bdd2d14c..8da37fbefdfc 100644
--- a/components/variations/child_process_field_trial_syncer.cc
+++ b/components/variations/child_process_field_trial_syncer.cc
@@ -9,6 +9,7 @@
#include "base/base_switches.h"
#include "base/command_line.h"
+#include "base/record_replay.h"
#include "components/variations/variations_crash_keys.h"
namespace variations {
@@ -21,6 +22,8 @@ ChildProcessFieldTrialSyncer::~ChildProcessFieldTrialSyncer() {}
void ChildProcessFieldTrialSyncer::InitFieldTrialObserving(
const base::CommandLine& command_line) {
+ recordreplay::Assert("ChildProcessFieldTrialSyncer::InitFieldTrialObserving Start");
+
// Set up initial set of crash dump data for field trials in this process.
variations::InitCrashKeys();
@@ -41,9 +44,13 @@ void ChildProcessFieldTrialSyncer::InitFieldTrialObserving(
base::FieldTrial::ActiveGroups current_active_trials;
base::FieldTrialList::GetActiveFieldTrialGroups(&current_active_trials);
for (const auto& trial : current_active_trials) {
- if (!base::Contains(initially_active_trials_set, trial.trial_name))
+ if (!base::Contains(initially_active_trials_set, trial.trial_name)) {
+ recordreplay::Assert("ChildProcessFieldTrialSyncer::InitFieldTrialObserving #1");
observer_->OnFieldTrialGroupFinalized(trial.trial_name, trial.group_name);
+ }
}
+
+ recordreplay::Assert("ChildProcessFieldTrialSyncer::InitFieldTrialObserving Done");
}
void ChildProcessFieldTrialSyncer::OnSetFieldTrialGroup(
diff --git a/components/viz/common/quads/compositor_render_pass.cc b/components/viz/common/quads/compositor_render_pass.cc
index 0ddff20504e2..ee79c531ca0a 100644
--- a/components/viz/common/quads/compositor_render_pass.cc
+++ b/components/viz/common/quads/compositor_render_pass.cc
@@ -15,6 +15,7 @@
#include "base/trace_event/traced_value.h"
#include "base/values.h"
#include "cc/base/math_util.h"
+#include "components/viz/common/quads/aggregated_render_pass.h"
#include "components/viz/common/quads/compositor_render_pass_draw_quad.h"
#include "components/viz/common/quads/debug_border_draw_quad.h"
#include "components/viz/common/quads/draw_quad.h"
@@ -254,4 +255,44 @@ std::unique_ptr<CompositorRenderPass> CompositorRenderPass::DeepCopy() const {
return copy_pass;
}
+std::unique_ptr<AggregatedRenderPass> CompositorRenderPass::DeepCopyAggregated() const {
+ std::unique_ptr<AggregatedRenderPass> copy_pass =
+ std::make_unique<AggregatedRenderPass>(shared_quad_state_list.size(),
+ quad_list.size());
+ copy_pass->SetAll(AggregatedRenderPassId::FromUnsafeValue(id.GetUnsafeValue()),
+ output_rect, damage_rect, transform_to_root_target,
+ filters, backdrop_filters, backdrop_filter_bounds,
+ gfx::ContentColorUsage::kSRGB,
+ has_transparent_background, cache_render_pass,
+ has_damage_from_contributing_content, generate_mipmap);
+
+ if (shared_quad_state_list.empty()) {
+ DCHECK(quad_list.empty());
+ return copy_pass;
+ }
+
+ SharedQuadStateList::ConstIterator sqs_iter = shared_quad_state_list.begin();
+ SharedQuadState* copy_shared_quad_state =
+ copy_pass->CreateAndAppendSharedQuadState();
+ *copy_shared_quad_state = **sqs_iter;
+ for (auto* quad : quad_list) {
+ while (quad->shared_quad_state != *sqs_iter) {
+ ++sqs_iter;
+ DCHECK(sqs_iter != shared_quad_state_list.end());
+ copy_shared_quad_state = copy_pass->CreateAndAppendSharedQuadState();
+ *copy_shared_quad_state = **sqs_iter;
+ }
+ DCHECK(quad->shared_quad_state == *sqs_iter);
+
+ if (quad->material == DrawQuad::Material::kCompositorRenderPass) {
+ const auto* pass_quad = CompositorRenderPassDrawQuad::MaterialCast(quad);
+ auto nid = AggregatedRenderPassId::FromUnsafeValue(pass_quad->render_pass_id.GetUnsafeValue());
+ copy_pass->CopyFromAndAppendRenderPassDrawQuad(pass_quad, nid);
+ } else {
+ copy_pass->CopyFromAndAppendDrawQuad(quad);
+ }
+ }
+ return copy_pass;
+}
+
} // namespace viz
diff --git a/components/viz/common/quads/compositor_render_pass.h b/components/viz/common/quads/compositor_render_pass.h
index c0a6f7b373a3..0772acc7e084 100644
--- a/components/viz/common/quads/compositor_render_pass.h
+++ b/components/viz/common/quads/compositor_render_pass.h
@@ -96,6 +96,10 @@ class VIZ_COMMON_EXPORT CompositorRenderPass : public RenderPassInternal {
// A deep copy of the render pass that includes quads.
std::unique_ptr<CompositorRenderPass> DeepCopy() const;
+ // Used when recording/replaying, where render passes are not aggregated
+ // before being drawn.
+ std::unique_ptr<AggregatedRenderPass> DeepCopyAggregated() const;
+
protected:
// This is essentially "using RenderPassInternal::RenderPassInternal", but
// since that generates inline (complex) ctors, the chromium-style plug-in
diff --git a/components/viz/service/BUILD.gn b/components/viz/service/BUILD.gn
index 504c1f1efd5d..a2fa9f62b3e6 100644
--- a/components/viz/service/BUILD.gn
+++ b/components/viz/service/BUILD.gn
@@ -76,6 +76,8 @@ viz_component("service") {
"display/overlay_processor_stub.h",
"display/program_binding.cc",
"display/program_binding.h",
+ "display/record_replay_render.cc",
+ "display/record_replay_render.h",
"display/render_pass_id_remapper.cc",
"display/render_pass_id_remapper.h",
"display/renderer_utils.cc",
diff --git a/components/viz/service/display/display_resource_provider.cc b/components/viz/service/display/display_resource_provider.cc
index 454e5c3f63e8..a3a2e0f0bd9b 100644
--- a/components/viz/service/display/display_resource_provider.cc
+++ b/components/viz/service/display/display_resource_provider.cc
@@ -9,6 +9,7 @@
#include "base/atomic_sequence_num.h"
#include "base/numerics/safe_math.h"
+#include "base/record_replay.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -16,6 +17,7 @@
#include "base/trace_event/trace_event.h"
#include "components/viz/common/gpu/context_provider.h"
#include "components/viz/common/resources/resource_sizes.h"
+#include "components/viz/service/display/record_replay_render.h"
#include "components/viz/service/display/shared_bitmap_manager.h"
#include "components/viz/service/display/skia_output_surface.h"
#include "gpu/GLES2/gl2extchromium.h"
@@ -870,6 +872,16 @@ DisplayResourceProvider::ScopedReadLockSkImage::ScopedReadLockSkImage(
SkAlphaType alpha_type,
GrSurfaceOrigin origin)
: resource_provider_(resource_provider), resource_id_(resource_id) {
+ // When recording/replaying we don't have a resoure provider, and need to get
+ // the bitmap directly from the record/replay renderer.
+ if (recordreplay::IsRecordingOrReplaying()) {
+ SkBitmap sk_bitmap;
+ RecordReplayPopulateSkBitmapWithResource(&sk_bitmap, resource_id);
+ sk_bitmap.setImmutable();
+ sk_image_ = SkImage::MakeFromBitmap(sk_bitmap);
+ return;
+ }
+
const ChildResource* resource =
resource_provider->LockForRead(resource_id, false /* overlay_only */);
DCHECK(resource);
@@ -920,6 +932,9 @@ DisplayResourceProvider::ScopedReadLockSkImage::ScopedReadLockSkImage(
}
DisplayResourceProvider::ScopedReadLockSkImage::~ScopedReadLockSkImage() {
+ if (recordreplay::IsRecordingOrReplaying()) {
+ return;
+ }
resource_provider_->UnlockForRead(resource_id_, false /* overlay_only */);
}
diff --git a/components/viz/service/display/record_replay_render.cc b/components/viz/service/display/record_replay_render.cc
new file mode 100644
index 000000000000..0b1816f6f7f0
--- /dev/null
+++ b/components/viz/service/display/record_replay_render.cc
@@ -0,0 +1,167 @@
+// Copyright 2020 Record Replay Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/viz/service/display/record_replay_render.h"
+
+#include "base/base64.h"
+#include "base/record_replay.h"
+#include "base/strings/stringprintf.h"
+#include "components/viz/common/display/renderer_settings.h"
+#include "components/viz/service/display/software_output_device.h"
+#include "components/viz/service/display/software_renderer.h"
+#include "components/viz/service/display_embedder/software_output_surface.h"
+#include "ui/gfx/codec/jpeg_codec.h"
+
+namespace viz {
+
+struct SharedBitmapInfo {
+ SharedBitmapId id_;
+ uint8_t* memory_;
+ size_t size_;
+};
+typedef std::vector<SharedBitmapInfo> SharedBitmapInfoVector;
+static SharedBitmapInfoVector* gSharedBitmaps;
+
+void RecordReplayNotifyRasterBuffer(const SharedBitmapId& shared_bitmap_id,
+ void* memory, size_t size) {
+ if (!gSharedBitmaps) {
+ gSharedBitmaps = new SharedBitmapInfoVector();
+ }
+ gSharedBitmaps->push_back({ shared_bitmap_id, (uint8_t*)memory, size });
+}
+
+static SoftwareOutputSurface* gOutputSurface;
+static SoftwareRenderer* gRenderer;
+
+static void InitializeRenderer() {
+ std::unique_ptr<SoftwareOutputDevice> output_device = std::make_unique<SoftwareOutputDevice>();
+ gOutputSurface = new SoftwareOutputSurface(std::move(output_device));
+
+ gRenderer = new SoftwareRenderer(new RendererSettings(),
+ new DebugRendererSettings(),
+ gOutputSurface,
+ nullptr,
+ nullptr);
+}
+
+static std::string SurfaceIdString(const viz::LocalSurfaceId& local_surface_id) {
+ // LocalSurfaceId has ToString(), but without verbose logging the token is truncated.
+ return base::StringPrintf("%u:%u:%s",
+ local_surface_id.parent_sequence_number(),
+ local_surface_id.child_sequence_number(),
+ local_surface_id.embed_token().ToString().c_str());
+}
+
+// For now we only support drawing a single surface, which is the first one
+// which the process tried to draw.
+static viz::LocalSurfaceId* gSurfaceId;
+
+// Current compositor frame.
+static const viz::CompositorFrame* gCurrentFrame;
+
+void RecordReplaySubmitCompositorFrame(const viz::LocalSurfaceId& local_surface_id,
+ const viz::CompositorFrame& frame) {
+ CHECK(recordreplay::IsRecordingOrReplaying());
+
+ if (!gSurfaceId) {
+ gSurfaceId = new viz::LocalSurfaceId(local_surface_id);
+ InitializeRenderer();
+ }
+
+ if (*gSurfaceId != local_surface_id) {
+ recordreplay::Print("Ignoring composite to unknown surface %s, expected %s",
+ SurfaceIdString(local_surface_id).c_str(),
+ SurfaceIdString(*gSurfaceId).c_str());
+ return;
+ }
+
+ AggregatedRenderPassList render_passes;
+ for (const auto& pass : frame.render_pass_list) {
+ render_passes.push_back(pass->DeepCopyAggregated());
+ }
+
+ gCurrentFrame = &frame;
+
+ gRenderer->DrawFrame(&render_passes,
+ 1,
+ frame.size_in_pixels(),
+ gfx::DisplayColorSpaces(),
+ SurfaceDamageRectList());
+
+ gCurrentFrame = nullptr;
+}
+
+void RecordReplayPopulateSkBitmapWithResource(SkBitmap* sk_bitmap, ResourceId resource_id) {
+ CHECK(gCurrentFrame);
+
+ const TransferableResource* transferable = nullptr;
+ for (const TransferableResource& resource : gCurrentFrame->resource_list) {
+ if (resource.id == resource_id) {
+ transferable = &resource;
+ break;
+ }
+ }
+ CHECK(transferable);
+
+ void* pixels = nullptr;
+ for (const SharedBitmapInfo& info : *gSharedBitmaps) {
+ if (info.id_ == transferable->mailbox_holder.mailbox) {
+ pixels = info.memory_;
+ }
+ }
+ CHECK(pixels);
+
+ SkImageInfo info =
+ SkImageInfo::MakeN32Premul(transferable->size.width(),
+ transferable->size.height());
+ bool pixels_installed = sk_bitmap->installPixels(info, pixels, info.minRowBytes());
+ CHECK(pixels_installed);
+}
+
+extern "C" size_t V8RecordReplayPaintStart();
+extern "C" void V8RecordReplayPaintFinished(size_t bookmark);
+extern "C" void V8RecordReplaySetPaintCallback(char* (*callback)(const char*, int));
+
+const SkPixmap* gCurrentPixmap;
+
+static char* PaintCallback(const char* mime_type, int jpeg_quality) {
+ CHECK(gCurrentPixmap);
+
+ if (strcmp(mime_type, "image/jpeg")) {
+ // NYI
+ return nullptr;
+ }
+
+ std::vector<unsigned char> data;
+ if (!gfx::JPEGCodec::Encode(*gCurrentPixmap, jpeg_quality,
+ SkJpegEncoder::Downsample::k444, &data)) {
+ recordreplay::Print("Error: JPEG encoding failed");
+ return nullptr;
+ }
+
+ std::string encoded = base::Base64Encode(data);
+ return strdup(encoded.c_str());
+}
+
+// Bookmark for the last point where a paint was committed on the main thread.
+static size_t gLastPaintBookmark;
+
+void RecordReplayOnCommitPaint() {
+ gLastPaintBookmark = V8RecordReplayPaintStart();
+}
+
+void RecordReplayPaintFinished(const SkPixmap& pixmap) {
+ static bool hasPaints = false;
+ if (!hasPaints) {
+ hasPaints = true;
+ V8RecordReplaySetPaintCallback(PaintCallback);
+ }
+
+ CHECK(gLastPaintBookmark);
+ gCurrentPixmap = &pixmap;
+ V8RecordReplayPaintFinished(gLastPaintBookmark);
+ gCurrentPixmap = nullptr;
+}
+
+} // namespace viz
diff --git a/components/viz/service/display/record_replay_render.h b/components/viz/service/display/record_replay_render.h
new file mode 100644
index 000000000000..998a6d6968f1
--- /dev/null
+++ b/components/viz/service/display/record_replay_render.h
@@ -0,0 +1,45 @@
+// Copyright 2020 Record Replay Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_VIZ_SERVICE_DISPLAY_RECORD_REPLAY_RENDER_H_
+#define COMPONENTS_VIZ_SERVICE_DISPLAY_RECORD_REPLAY_RENDER_H_
+
+#include "components/viz/common/resources/shared_bitmap.h"
+#include "components/viz/common/resources/resource_id.h"
+#include "components/viz/common/surfaces/local_surface_id.h"
+#include "components/viz/common/quads/compositor_frame.h"
+#include "third_party/skia/include/core/SkBitmap.h"
+#include "third_party/skia/include/core/SkPixmap.h"
+
+namespace viz {
+
+// When recording, renderer processes generate compositor frames in the usual
+// way and send them on to the GPU process for drawing to the screen. When
+// replaying (and optionally when recording, for debugging) the process
+// additionally sends these frames to an in process renderer for updating an
+// in process buffer with the data the process is currently drawing. This data
+// can then be encoded to base64 images and reported to the record/replay
+// driver and sent to clients inspecting the recording.
+
+// Called on the main thread when changes have been committed to the layer tree
+// and a paint has been triggered.
+void RecordReplayOnCommitPaint();
+
+// Called when a shared memory buffer for rasterization has been created.
+void RecordReplayNotifyRasterBuffer(const SharedBitmapId& shared_bitmap_id,
+ void* memory, size_t size);
+
+// Called when a CompositorFrame is being submitted to the GPU process.
+void RecordReplaySubmitCompositorFrame(const viz::LocalSurfaceId& local_surface_id,
+ const viz::CompositorFrame& frame);
+
+// Called to populate a bitmap with information for the given resource in the current frame.
+void RecordReplayPopulateSkBitmapWithResource(SkBitmap* sk_bitmap, ResourceId resource_id);
+
+// Called when painting to the software output device has finished.
+void RecordReplayPaintFinished(const SkPixmap& pixmap);
+
+} // namespace viz
+
+#endif // COMPONENTS_VIZ_SERVICE_DISPLAY_RECORD_REPLAY_RENDER_H_
diff --git a/components/viz/service/display/software_output_device.cc b/components/viz/service/display/software_output_device.cc
index 0ac41068d5f9..8c598b664396 100644
--- a/components/viz/service/display/software_output_device.cc
+++ b/components/viz/service/display/software_output_device.cc
@@ -8,7 +8,9 @@
#include "base/bind.h"
#include "base/check.h"
+#include "base/record_replay.h"
#include "base/threading/sequenced_task_runner_handle.h"
+#include "components/viz/service/display/record_replay_render.h"
#include "skia/ext/legacy_display_globals.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "ui/gfx/vsync_provider.h"
@@ -48,7 +50,14 @@ SkCanvas* SoftwareOutputDevice::BeginPaint(const gfx::Rect& damage_rect) {
return surface_ ? surface_->getCanvas() : nullptr;
}
-void SoftwareOutputDevice::EndPaint() {}
+void SoftwareOutputDevice::EndPaint() {
+ if (recordreplay::IsRecordingOrReplaying()) {
+ SkPixmap pixmap;
+ if (surface_ && surface_->peekPixels(&pixmap)) {
+ RecordReplayPaintFinished(pixmap);
+ }
+ }
+}
gfx::VSyncProvider* SoftwareOutputDevice::GetVSyncProvider() {
return vsync_provider_.get();
diff --git a/content/browser/child_process_launcher_helper_mac.cc b/content/browser/child_process_launcher_helper_mac.cc
index 7dd338de3f73..a078ee6dc278 100644
--- a/content/browser/child_process_launcher_helper_mac.cc
+++ b/content/browser/child_process_launcher_helper_mac.cc
@@ -164,7 +164,7 @@ void ChildProcessLauncherHelper::ForceNormalProcessTerminationSync(
DCHECK(CurrentlyOnProcessLauncherTaskRunner());
// Client has gone away, so just kill the process. Using exit code 0 means
// that UMA won't treat this as a crash.
- process.process.Terminate(RESULT_CODE_NORMAL_EXIT, false);
+ //process.process.Terminate(RESULT_CODE_NORMAL_EXIT, false);
base::EnsureProcessTerminated(std::move(process.process));
}
diff --git a/content/child/child_thread_impl.cc b/content/child/child_thread_impl.cc
index d22aacaf33d7..3520c5704e38 100644
--- a/content/child/child_thread_impl.cc
+++ b/content/child/child_thread_impl.cc
@@ -31,6 +31,7 @@
#include "base/power_monitor/power_monitor.h"
#include "base/process/process.h"
#include "base/process/process_handle.h"
+#include "base/record_replay.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_number_conversions.h"
@@ -537,6 +538,8 @@ void ChildThreadImpl::OnFieldTrialGroupFinalized(
}
void ChildThreadImpl::Init(const Options& options) {
+ recordreplay::Assert("ChildThreadImpl::Init Start");
+
TRACE_EVENT0("startup", "ChildThreadImpl::Init");
g_lazy_child_thread_impl_tls.Pointer()->Set(this);
on_channel_error_called_ = false;
@@ -676,11 +679,14 @@ void ChildThreadImpl::Init(const Options& options) {
// In single-process mode, there is no need to synchronize trials to the
// browser process (because it's the same process).
if (!IsInBrowserProcess()) {
+ recordreplay::Assert("ChildThreadImpl::Init #1");
field_trial_syncer_.reset(
new variations::ChildProcessFieldTrialSyncer(this));
field_trial_syncer_->InitFieldTrialObserving(
*base::CommandLine::ForCurrentProcess());
}
+
+ recordreplay::Assert("ChildThreadImpl::Init Done");
}
ChildThreadImpl::~ChildThreadImpl() {
diff --git a/content/renderer/categorized_worker_pool.cc b/content/renderer/categorized_worker_pool.cc
index c8ac03c10b49..f052bf856bbf 100644
--- a/content/renderer/categorized_worker_pool.cc
+++ b/content/renderer/categorized_worker_pool.cc
@@ -169,7 +169,8 @@ class CategorizedWorkerPool::CategorizedWorkerPoolSequencedTaskRunner
};
CategorizedWorkerPool::CategorizedWorkerPool()
- : namespace_token_(GenerateNamespaceToken()),
+ : lock_("CategorizedWorkerPool.lock_"),
+ namespace_token_(GenerateNamespaceToken()),
has_task_for_normal_priority_thread_cv_(&lock_),
has_task_for_background_priority_thread_cv_(&lock_),
has_namespaces_with_finished_running_tasks_cv_(&lock_),
diff --git a/content/renderer/loader/navigation_body_loader.cc b/content/renderer/loader/navigation_body_loader.cc
index 8422c3126719..500fde104be5 100644
--- a/content/renderer/loader/navigation_body_loader.cc
+++ b/content/renderer/loader/navigation_body_loader.cc
@@ -6,6 +6,7 @@
#include "base/bind.h"
#include "base/macros.h"
+#include "base/record_replay.h"
#include "content/renderer/loader/web_url_loader_impl.h"
#include "content/renderer/render_frame_impl.h"
#include "services/network/public/cpp/url_loader_completion_status.h"
@@ -279,6 +280,8 @@ void NavigationBodyLoader::ReadFromDataPipe() {
uint32_t available = 0;
MojoResult result =
handle_->BeginReadData(&buffer, &available, MOJO_READ_DATA_FLAG_NONE);
+ recordreplay::Assert("NavigationBodyLoader::ReadFromDataPipe %u %u",
+ available, available ? ((const char*)buffer)[0] : 0);
if (result == MOJO_RESULT_SHOULD_WAIT) {
handle_watcher_.ArmOrNotify();
return;
diff --git a/content/renderer/render_frame_impl.cc b/content/renderer/render_frame_impl.cc
index cc800a8dd37c..3804002798eb 100644
--- a/content/renderer/render_frame_impl.cc
+++ b/content/renderer/render_frame_impl.cc
@@ -33,6 +33,7 @@
#include "base/metrics/histogram_macros.h"
#include "base/optional.h"
#include "base/process/process.h"
+#include "base/record_replay.h"
#include "base/run_loop.h"
#include "base/stl_util.h"
#include "base/strings/strcat.h"
@@ -4676,6 +4677,7 @@ void RenderFrameImpl::DidObserveLazyLoadBehavior(
void RenderFrameImpl::DidCreateScriptContext(v8::Local<v8::Context> context,
int world_id) {
+ recordreplay::Assert("RenderFrameImpl::DidCreateScriptContext Start");
if (((enabled_bindings_ & BINDINGS_POLICY_MOJO_WEB_UI) ||
enable_mojo_js_bindings_) &&
IsMainFrame() && world_id == ISOLATED_WORLD_ID_GLOBAL) {
@@ -4684,8 +4686,12 @@ void RenderFrameImpl::DidCreateScriptContext(v8::Local<v8::Context> context,
blink::WebContextFeatures::EnableMojoJS(context, true);
}
- for (auto& observer : observers_)
+ for (auto& observer : observers_) {
+ recordreplay::Assert("RenderFrameImpl::DidCreateScriptContext #1");
observer.DidCreateScriptContext(context, world_id);
+ }
+
+ recordreplay::Assert("RenderFrameImpl::DidCreateScriptContext Done");
}
void RenderFrameImpl::WillReleaseScriptContext(v8::Local<v8::Context> context,
diff --git a/content/renderer/render_thread_impl.cc b/content/renderer/render_thread_impl.cc
index 055e7bfc4bc5..dcc4489db7bb 100644
--- a/content/renderer/render_thread_impl.cc
+++ b/content/renderer/render_thread_impl.cc
@@ -27,6 +27,7 @@
#include "base/metrics/histogram_macros_local.h"
#include "base/path_service.h"
#include "base/process/process_metrics.h"
+#include "base/record_replay.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string16.h"
@@ -912,6 +913,7 @@ void RenderThreadImpl::SetResourceDispatcherDelegate(
}
void RenderThreadImpl::InitializeCompositorThread() {
+ recordreplay::Assert("RenderThreadImpl::InitializeCompositorThread START");
blink_platform_impl_->CreateAndSetCompositorThread();
compositor_task_runner_ = blink_platform_impl_->CompositorThreadTaskRunner();
compositor_task_runner_->PostTask(
@@ -920,6 +922,7 @@ void RenderThreadImpl::InitializeCompositorThread() {
false));
GetContentClient()->renderer()->PostCompositorThreadCreated(
compositor_task_runner_.get());
+ recordreplay::Assert("RenderThreadImpl::InitializeCompositorThread DONE");
}
scoped_refptr<base::SingleThreadTaskRunner>
diff --git a/content/renderer/theme_helper_mac.mm b/content/renderer/theme_helper_mac.mm
index c15f3a631292..cedd1b771199 100644
--- a/content/renderer/theme_helper_mac.mm
+++ b/content/renderer/theme_helper_mac.mm
@@ -6,6 +6,7 @@
#include <Cocoa/Cocoa.h>
+#include "base/record_replay.h"
#include "base/strings/sys_string_conversions.h"
extern "C" {
@@ -17,6 +18,9 @@ namespace content {
void SystemColorsDidChange(int aqua_color_variant,
const std::string& highlight_text_color,
const std::string& highlight_color) {
+ recordreplay::Assert("SystemColorsDidChange %s %s",
+ highlight_text_color.c_str(), highlight_color.c_str());
+
NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults];
// Register the defaults in the NSArgumentDomain, which is considered
diff --git a/extensions/renderer/v8_schema_registry.cc b/extensions/renderer/v8_schema_registry.cc
index edd0d2bd9488..10c57459fedb 100644
--- a/extensions/renderer/v8_schema_registry.cc
+++ b/extensions/renderer/v8_schema_registry.cc
@@ -17,6 +17,7 @@
#include "extensions/renderer/script_context.h"
#include "extensions/renderer/static_v8_external_one_byte_string_resource.h"
#include "extensions/renderer/v8_helpers.h"
+#include "v8/include/v8-inspector.h"
using content::V8ValueConverter;
diff --git a/gpu/ipc/client/gpu_channel_host.cc b/gpu/ipc/client/gpu_channel_host.cc
index d4c969c636db..2507bbf2986b 100644
--- a/gpu/ipc/client/gpu_channel_host.cc
+++ b/gpu/ipc/client/gpu_channel_host.cc
@@ -11,6 +11,7 @@
#include "base/bind.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
+#include "base/record_replay.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_restrictions.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -48,7 +49,8 @@ GpuChannelHost::GpuChannelHost(int channel_id,
image_decode_accelerator_proxy_(
this,
static_cast<int32_t>(
- GpuChannelReservedRoutes::kImageDecodeAccelerator)) {
+ GpuChannelReservedRoutes::kImageDecodeAccelerator)),
+ context_lock_("GpuChannelHost.context_lock_") {
next_image_id_.GetNext();
for (int32_t i = 0;
i <= static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue); ++i)
@@ -145,8 +147,11 @@ uint32_t GpuChannelHost::EnqueueDeferredMessage(
}
void GpuChannelHost::EnsureFlush(uint32_t deferred_message_id) {
+ recordreplay::Assert("GpuChannelHost::EnsureFlush Start");
AutoLock lock(context_lock_);
+ recordreplay::Assert("GpuChannelHost::EnsureFlush #1");
InternalFlush(deferred_message_id);
+ recordreplay::Assert("GpuChannelHost::EnsureFlush Done");
}
void GpuChannelHost::VerifyFlush(uint32_t deferred_message_id) {
@@ -183,6 +188,9 @@ void GpuChannelHost::EnqueuePendingOrderingBarrier() {
void GpuChannelHost::InternalFlush(uint32_t deferred_message_id) {
context_lock_.AssertAcquired();
+ recordreplay::Assert("GpuChannelHost::InternalFlush Start %d %u %u",
+ deferred_messages_.empty(), deferred_message_id, flushed_deferred_message_id_);
+
EnqueuePendingOrderingBarrier();
if (!deferred_messages_.empty() &&
deferred_message_id > flushed_deferred_message_id_) {
diff --git a/ipc/ipc_channel_proxy.cc b/ipc/ipc_channel_proxy.cc
index f204948f7d76..0e4c177fadc7 100644
--- a/ipc/ipc_channel_proxy.cc
+++ b/ipc/ipc_channel_proxy.cc
@@ -14,6 +14,7 @@
#include "base/location.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
+#include "base/record_replay.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
@@ -298,10 +299,12 @@ void ChannelProxy::Context::OnRemoveFilter(MessageFilter* filter) {
// Called on the listener's thread
void ChannelProxy::Context::AddFilter(MessageFilter* filter) {
+ recordreplay::Assert("ChannelProxy::Context::AddFilter Start");
base::AutoLock auto_lock(pending_filters_lock_);
pending_filters_.push_back(base::WrapRefCounted(filter));
ipc_task_runner_->PostTask(FROM_HERE,
base::BindOnce(&Context::OnAddFilter, this));
+ recordreplay::Assert("ChannelProxy::Context::AddFilter Done");
}
// Called on the listener's thread
@@ -396,8 +399,10 @@ void ChannelProxy::Context::OnDispatchBadMessage(const Message& message) {
void ChannelProxy::Context::OnDispatchAssociatedInterfaceRequest(
const std::string& interface_name,
mojo::ScopedInterfaceEndpointHandle handle) {
+ recordreplay::Assert("ChannelProxy::Context::OnDispatchAssociatedInterfaceRequest Start");
if (listener_)
listener_->OnAssociatedInterfaceRequest(interface_name, std::move(handle));
+ recordreplay::Assert("ChannelProxy::Context::OnDispatchAssociatedInterfaceRequest Done");
}
void ChannelProxy::Context::ClearChannel() {
diff --git a/ipc/ipc_mojo_bootstrap.cc b/ipc/ipc_mojo_bootstrap.cc
index 1fc3b955bf82..b5f258d6c99b 100644
--- a/ipc/ipc_mojo_bootstrap.cc
+++ b/ipc/ipc_mojo_bootstrap.cc
@@ -20,6 +20,7 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/no_destructor.h"
+#include "base/record_replay.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/stringprintf.h"
@@ -130,7 +131,8 @@ class ChannelAssociatedGroupController
dispatcher_(this),
control_message_handler_(this),
control_message_proxy_thunk_(this),
- control_message_proxy_(&control_message_proxy_thunk_) {
+ control_message_proxy_(&control_message_proxy_thunk_),
+ lock_("ChannelAssociatedGroupController.lock_") {
thread_checker_.DetachFromThread();
control_message_handler_.SetDescription(
"IPC::mojom::Bootstrap [primary] PipeControlMessageHandler");
@@ -772,6 +774,8 @@ class ChannelAssociatedGroupController
}
void NotifyEndpointOfError(Endpoint* endpoint, bool force_async) {
+ recordreplay::Assert("NotifyEndpointOfError Start");
+
lock_.AssertAcquired();
DCHECK(endpoint->task_runner() && endpoint->client());
if (endpoint->task_runner()->RunsTasksInCurrentSequence() && !force_async) {
@@ -788,6 +792,8 @@ class ChannelAssociatedGroupController
NotifyEndpointOfErrorOnEndpointThread,
this, endpoint->id(), base::Unretained(endpoint)));
}
+
+ recordreplay::Assert("NotifyEndpointOfError Done");
}
void NotifyEndpointOfErrorOnEndpointThread(mojo::InterfaceId id,
@@ -915,6 +921,8 @@ class ChannelAssociatedGroupController
}
void AcceptOnProxyThread(mojo::Message message) {
+ recordreplay::Assert("AcceptOnProxyThread Start");
+
DCHECK(proxy_task_runner_->BelongsToCurrentThread());
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mojom"),
"ChannelAssociatedGroupController::AcceptOnProxyThread");
@@ -924,12 +932,16 @@ class ChannelAssociatedGroupController
base::AutoLock locker(lock_);
Endpoint* endpoint = FindEndpoint(id);
- if (!endpoint)
+ if (!endpoint) {
+ recordreplay::Assert("AcceptOnProxyThread #1");
return;
+ }
mojo::InterfaceEndpointClient* client = endpoint->client();
- if (!client)
+ if (!client) {
+ recordreplay::Assert("AcceptOnProxyThread #2");
return;
+ }
// Using client->interface_name() is safe here because this is a static
// string defined for each mojo interface.
@@ -947,6 +959,8 @@ class ChannelAssociatedGroupController
if (!result)
RaiseError();
+
+ recordreplay::Assert("AcceptOnProxyThread Done");
}
void AcceptSyncMessage(mojo::InterfaceId interface_id, uint32_t message_id) {
diff --git a/media/gpu/mac/vt_video_decode_accelerator_mac.cc b/media/gpu/mac/vt_video_decode_accelerator_mac.cc
index 2474f4f48e11..50902e28346c 100644
--- a/media/gpu/mac/vt_video_decode_accelerator_mac.cc
+++ b/media/gpu/mac/vt_video_decode_accelerator_mac.cc
@@ -196,6 +196,9 @@ base::ScopedCFTypeRef<CMFormatDescriptionRef> CreateVideoFormatVP9(
media::VideoCodecProfile profile,
base::Optional<gfx::HDRMetadata> hdr_metadata,
const gfx::Size& coded_size) {
+ // FIXME Disabled due to failing to compile...
+ return base::ScopedCFTypeRef<CMFormatDescriptionRef>();
+ /*
base::ScopedCFTypeRef<CFMutableDictionaryRef> format_config(
CreateFormatExtensions(kCMVideoCodecType_VP9, profile, color_space,
hdr_metadata));
@@ -212,6 +215,7 @@ base::ScopedCFTypeRef<CMFormatDescriptionRef> CreateVideoFormatVP9(
OSSTATUS_DLOG_IF(WARNING, status != noErr, status)
<< "CMVideoFormatDescriptionCreate()";
return format;
+ */
}
// Create a VTDecompressionSession using the provided |format|. If
@@ -318,7 +322,8 @@ bool InitializeVideoToolboxInternal() {
session.reset();
if (__builtin_available(macOS 11.0, *)) {
- VTRegisterSupplementalVideoDecoderIfAvailable(kCMVideoCodecType_VP9);
+ // FIXME disabled due to build break.
+ //VTRegisterSupplementalVideoDecoderIfAvailable(kCMVideoCodecType_VP9);
// Create a VP9 decoding session.
if (!CreateVideoToolboxSession(
@@ -1721,11 +1726,15 @@ VTVideoDecodeAccelerator::GetSupportedProfiles() {
if (!base::FeatureList::IsEnabled(kVideoToolboxVp9Decoding))
continue;
if (__builtin_available(macOS 10.13, *)) {
+ // FIXME disabled due to build break.
+ continue;
+ /*
if ((supported_profile == VP9PROFILE_PROFILE0 ||
supported_profile == VP9PROFILE_PROFILE2) &&
!VTIsHardwareDecodeSupported(kCMVideoCodecType_VP9)) {
continue;
}
+ */
// Success! We have VP9 hardware decoding support.
} else {
diff --git a/mojo/core/channel_mac.cc b/mojo/core/channel_mac.cc
index d212aa8ad69d..7deba2c15d9d 100644
--- a/mojo/core/channel_mac.cc
+++ b/mojo/core/channel_mac.cc
@@ -23,6 +23,7 @@
#include "base/mac/scoped_mach_port.h"
#include "base/mac/scoped_mach_vm.h"
#include "base/message_loop/message_pump_for_io.h"
+#include "base/record_replay.h"
#include "base/strings/stringprintf.h"
#include "base/task/current_thread.h"
@@ -224,6 +225,11 @@ class ChannelMac : public Channel,
// soon as the Channel establishes both the send and receive ports.
bool RequestSendDeadNameNotification() {
base::mac::ScopedMachSendRight previous;
+
+ recordreplay::Assert("RequestSendDeadNameNotification %d %d %d %d",
+ send_port_.get(), MACH_NOTIFY_DEAD_NAME,
+ receive_port_.get(), MACH_MSG_TYPE_MAKE_SEND_ONCE);
+
kern_return_t kr = mach_port_request_notification(
mach_task_self(), send_port_.get(), MACH_NOTIFY_DEAD_NAME, 0,
receive_port_.get(), MACH_MSG_TYPE_MAKE_SEND_ONCE,
@@ -433,6 +439,7 @@ class ChannelMac : public Channel,
}
bool MachMessageSendLocked(mach_msg_header_t* header) {
+ recordreplay::Assert("MachMessageSendLocked %lu", header->msgh_size);
kern_return_t kr = mach_msg(header, MACH_SEND_MSG | MACH_SEND_TIMEOUT,
header->msgh_size, 0, MACH_PORT_NULL,
/*timeout=*/0, MACH_PORT_NULL);
@@ -624,12 +631,23 @@ class ChannelMac : public Channel,
return;
}
+ // When replaying the raw address used when recording will be replayed,
+ // which we can't dereference. Allocate a new block of memory and copy
+ // in its contents from the recording.
+ void* address = descriptor->address;
+ if (recordreplay::IsReplaying()) {
+ address = nullptr;
+ kr = vm_allocate(mach_task_self(), (vm_address_t*)&address, descriptor->size, true);
+ CHECK(kr == KERN_SUCCESS);
+ }
+ recordreplay::RecordReplayBytes("ChannelMac::OnMachMessageReceived", address, descriptor->size);
+
payload = base::span<const char>(
- reinterpret_cast<const char*>(descriptor->address), descriptor->size);
+ reinterpret_cast<const char*>(address), descriptor->size);
// The kernel page-aligns the OOL memory when performing the mach_msg on
// the send side, but it preserves the original size in the descriptor.
ool_memory.reset_unaligned(
- reinterpret_cast<vm_address_t>(descriptor->address),
+ reinterpret_cast<vm_address_t>(address),
descriptor->size);
} else {
auto* data_size_ptr = buffer.Object<uint64_t>();
diff --git a/mojo/core/core.cc b/mojo/core/core.cc
index ea85da95ba74..0755669d09fb 100644
--- a/mojo/core/core.cc
+++ b/mojo/core/core.cc
@@ -17,6 +17,7 @@
#include "base/memory/ptr_util.h"
#include "base/memory/writable_shared_memory_region.h"
#include "base/rand_util.h"
+#include "base/record_replay.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -236,12 +237,15 @@ void Core::RequestShutdown(base::OnceClosure callback) {
}
MojoHandle Core::ExtractMessagePipeFromInvitation(const std::string& name) {
+ recordreplay::Assert("Core::ExtractMessagePipeFromInvitation Overload Start");
RequestContext request_context;
ports::PortRef port0, port1;
GetNodeController()->node()->CreatePortPair(&port0, &port1);
MojoHandle handle = AddDispatcher(new MessagePipeDispatcher(
GetNodeController(), port0, kUnknownPipeIdForDebug, 1));
+ recordreplay::Assert("Core::ExtractMessagePipeFromInvitation Overload #1");
GetNodeController()->MergePortIntoInviter(name, port1);
+ recordreplay::Assert("Core::ExtractMessagePipeFromInvitation Overload Done");
return handle;
}
@@ -327,14 +331,21 @@ MojoResult Core::ArmTrap(MojoHandle trap_handle,
const MojoArmTrapOptions* options,
uint32_t* num_blocking_events,
MojoTrapEvent* blocking_events) {
- if (options && options->struct_size < sizeof(*options))
+ recordreplay::Assert("Core::ArmTrap Start");
+ if (options && options->struct_size < sizeof(*options)) {
+ recordreplay::Assert("Core::ArmTrap #1");
return MOJO_RESULT_INVALID_ARGUMENT;
+ }
RequestContext request_context;
scoped_refptr<Dispatcher> watcher = GetDispatcher(trap_handle);
- if (!watcher || watcher->GetType() != Dispatcher::Type::WATCHER)
+ if (!watcher || watcher->GetType() != Dispatcher::Type::WATCHER) {
+ recordreplay::Assert("Core::ArmTrap #2");
return MOJO_RESULT_INVALID_ARGUMENT;
- return watcher->Arm(num_blocking_events, blocking_events);
+ }
+ MojoResult rv = watcher->Arm(num_blocking_events, blocking_events);
+ recordreplay::Assert("Core::ArmTrap Done %d", rv);
+ return rv;
}
MojoResult Core::CreateMessage(const MojoCreateMessageOptions* options,
@@ -351,11 +362,16 @@ MojoResult Core::CreateMessage(const MojoCreateMessageOptions* options,
}
MojoResult Core::DestroyMessage(MojoMessageHandle message_handle) {
- if (!message_handle)
+ recordreplay::Assert("Core::DestroyMessage Start");
+ if (!message_handle) {
+ recordreplay::Assert("Core::DestroyMessage #1");
return MOJO_RESULT_INVALID_ARGUMENT;
+ }
RequestContext request_context;
delete reinterpret_cast<ports::UserMessageEvent*>(message_handle);
+
+ recordreplay::Assert("Core::DestroyMessage Done");
return MOJO_RESULT_OK;
}
@@ -491,6 +507,10 @@ MojoResult Core::CreateMessagePipe(const MojoCreateMessagePipeOptions* options,
ports::PortRef port0, port1;
GetNodeController()->node()->CreatePortPair(&port0, &port1);
+ recordreplay::Assert("Core::CreateMessagePipe %lu %lu %lu %lu",
+ port0.name().v1, port0.name().v2,
+ port1.name().v1, port1.name().v2);
+
DCHECK(message_pipe_handle0);
DCHECK(message_pipe_handle1);
@@ -584,6 +604,9 @@ MojoResult Core::FuseMessagePipes(MojoHandle handle0,
MessagePipeDispatcher* mpd1 =
static_cast<MessagePipeDispatcher*>(dispatcher1.get());
+ recordreplay::Assert("Core::FuseMessagePipes %lu %lu",
+ recordreplay::PointerId(mpd0), recordreplay::PointerId(mpd1));
+
if (!mpd0->Fuse(mpd1))
return MOJO_RESULT_FAILED_PRECONDITION;
@@ -1209,6 +1232,8 @@ MojoResult Core::ExtractMessagePipeFromInvitation(
uint32_t name_num_bytes,
const MojoExtractMessagePipeFromInvitationOptions* options,
MojoHandle* message_pipe_handle) {
+ recordreplay::Assert("Core::ExtractMessagePipeFromInvitation Start");
+
if (options && options->struct_size < sizeof(*options))
return MOJO_RESULT_INVALID_ARGUMENT;
if (!message_pipe_handle)
diff --git a/mojo/core/data_pipe_consumer_dispatcher.cc b/mojo/core/data_pipe_consumer_dispatcher.cc
index 4cf360935749..383c2873bfe0 100644
--- a/mojo/core/data_pipe_consumer_dispatcher.cc
+++ b/mojo/core/data_pipe_consumer_dispatcher.cc
@@ -14,6 +14,7 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
+#include "base/record_replay.h"
#include "mojo/core/core.h"
#include "mojo/core/data_pipe_control_message.h"
#include "mojo/core/node_controller.h"
@@ -167,10 +168,16 @@ MojoResult DataPipeConsumerDispatcher::ReadData(
uint32_t tail_bytes_to_copy =
std::min(options_.capacity_num_bytes - read_offset_, bytes_to_read);
uint32_t head_bytes_to_copy = bytes_to_read - tail_bytes_to_copy;
- if (tail_bytes_to_copy > 0)
+ if (tail_bytes_to_copy > 0) {
+ recordreplay::RecordReplayBytes("DataPipeConsumerDispatcher::ReadData",
+ (uint8_t*)data + read_offset_, tail_bytes_to_copy);
memcpy(destination, data + read_offset_, tail_bytes_to_copy);
- if (head_bytes_to_copy > 0)
+ }
+ if (head_bytes_to_copy > 0) {
+ recordreplay::RecordReplayBytes("DataPipeConsumerDispatcher::ReadData",
+ (uint8_t*)data, head_bytes_to_copy);
memcpy(destination + tail_bytes_to_copy, data, head_bytes_to_copy);
+ }
}
*num_bytes = bytes_to_read;
@@ -223,6 +230,9 @@ MojoResult DataPipeConsumerDispatcher::BeginReadData(
*buffer_num_bytes = bytes_to_read;
two_phase_max_bytes_read_ = bytes_to_read;
+ recordreplay::RecordReplayBytes("DataPipeConsumerDispatcher::BeginReadData",
+ (void*)*buffer, *buffer_num_bytes);
+
if (had_new_data)
watchers_.NotifyState(GetHandleSignalsStateNoLock());
diff --git a/mojo/core/handle_table.cc b/mojo/core/handle_table.cc
index 9426281d73f0..84c0cf4ac092 100644
--- a/mojo/core/handle_table.cc
+++ b/mojo/core/handle_table.cc
@@ -40,7 +40,7 @@ const char* GetNameForDispatcherType(Dispatcher::Type type) {
} // namespace
-HandleTable::HandleTable() = default;
+HandleTable::HandleTable() : lock_("HandleTable.lock_") {}
HandleTable::~HandleTable() = default;
diff --git a/mojo/core/message_pipe_dispatcher.cc b/mojo/core/message_pipe_dispatcher.cc
index 980d01ceccce..8f4c9031b1dc 100644
--- a/mojo/core/message_pipe_dispatcher.cc
+++ b/mojo/core/message_pipe_dispatcher.cc
@@ -10,6 +10,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/record_replay.h"
#include "base/trace_event/trace_event.h"
#include "mojo/core/core.h"
#include "mojo/core/node_controller.h"
@@ -92,15 +93,25 @@ MessagePipeDispatcher::MessagePipeDispatcher(NodeController* node_controller,
port_(port),
pipe_id_(pipe_id),
endpoint_(endpoint),
+ signal_lock_("MessagePipeDispatcher.signal_lock_"),
watchers_(this) {
DVLOG(2) << "Creating new MessagePipeDispatcher for port " << port.name()
<< " [pipe_id=" << pipe_id << "; endpoint=" << endpoint << "]";
node_controller_->SetPortObserver(
port_, base::MakeRefCounted<PortObserverThunk>(this));
+
+ recordreplay::RegisterPointer(this);
+ recordreplay::Assert("MessagePipeDispatcher %lu %lu %lu",
+ recordreplay::PointerId(this),
+ port_.name().v1, port_.name().v2);
}
bool MessagePipeDispatcher::Fuse(MessagePipeDispatcher* other) {
+ recordreplay::Assert("MessagePipeDispatcher::Fuse %lu %lu %lu %lu",
+ port_.name().v1, port_.name().v2,
+ other->port_.name().v1, other->port_.name().v2);
+
node_controller_->SetPortObserver(port_, nullptr);
node_controller_->SetPortObserver(other->port_, nullptr);
@@ -375,7 +386,9 @@ scoped_refptr<Dispatcher> MessagePipeDispatcher::Deserialize(
state->pipe_id, state->endpoint);
}
-MessagePipeDispatcher::~MessagePipeDispatcher() = default;
+MessagePipeDispatcher::~MessagePipeDispatcher() {
+ recordreplay::UnregisterPointer(this);
+}
MojoResult MessagePipeDispatcher::CloseNoLock() {
signal_lock_.AssertAcquired();
diff --git a/mojo/core/node_channel.cc b/mojo/core/node_channel.cc
index c48fb573fea9..81815a37dcda 100644
--- a/mojo/core/node_channel.cc
+++ b/mojo/core/node_channel.cc
@@ -12,6 +12,7 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "base/record_replay.h"
#include "mojo/core/broker_host.h"
#include "mojo/core/channel.h"
#include "mojo/core/configuration.h"
@@ -395,6 +396,7 @@ void NodeChannel::AcceptBrokerClient(const ports::NodeName& broker_name,
void NodeChannel::RequestPortMerge(const ports::PortName& connector_port_name,
const std::string& token) {
+ recordreplay::Assert("NodeChannel::RequestPortMerge Start");
RequestPortMergeData* data;
Channel::MessagePtr message =
CreateMessage(MessageType::REQUEST_PORT_MERGE,
@@ -402,6 +404,7 @@ void NodeChannel::RequestPortMerge(const ports::PortName& connector_port_name,
data->connector_port_name = connector_port_name;
memcpy(data + 1, token.data(), token.size());
WriteChannelMessage(std::move(message));
+ recordreplay::Assert("NodeChannel::RequestPortMerge Done");
}
void NodeChannel::RequestIntroduction(const ports::NodeName& name) {
@@ -426,7 +429,9 @@ void NodeChannel::Introduce(const ports::NodeName& name,
}
void NodeChannel::SendChannelMessage(Channel::MessagePtr message) {
+ recordreplay::Assert("NodeChannel::SendChannelMessage Start");
WriteChannelMessage(std::move(message));
+ recordreplay::Assert("NodeChannel::SendChannelMessage Done");
}
void NodeChannel::Broadcast(Channel::MessagePtr message) {
@@ -538,14 +543,18 @@ void NodeChannel::OnChannelMessage(const void* payload,
std::vector<PlatformHandle> handles) {
DCHECK(owning_task_runner()->RunsTasksInCurrentSequence());
+ recordreplay::Assert("NodeChannel::OnChannelMessage Start");
+
RequestContext request_context(RequestContext::Source::SYSTEM);
if (payload_size <= sizeof(Header)) {
+ recordreplay::Assert("NodeChannel::OnChannelMessage #1");
delegate_->OnChannelError(remote_node_name_, this);
return;
}
const Header* header = static_cast<const Header*>(payload);
+ recordreplay::Assert("NodeChannel::OnChannelMessage #2 %d", header->type);
switch (header->type) {
case MessageType::ACCEPT_INVITEE: {
AcceptInviteeData data;
@@ -658,8 +667,10 @@ void NodeChannel::OnChannelMessage(const void* payload,
}
case MessageType::INTRODUCE: {
+ recordreplay::Assert("NodeChannel::OnChannelMessage #3");
IntroductionData data;
if (GetMessagePayload(payload, payload_size, &data)) {
+ recordreplay::Assert("NodeChannel::OnChannelMessage #4");
if (handles.size() > 1) {
DLOG(ERROR) << "Dropping invalid introduction message.";
break;
@@ -670,8 +681,10 @@ void NodeChannel::OnChannelMessage(const void* payload,
delegate_->OnIntroduce(remote_node_name_, data.name,
std::move(channel_handle));
+ recordreplay::Assert("NodeChannel::OnChannelMessage #5");
return;
}
+ recordreplay::Assert("NodeChannel::OnChannelMessage #6");
break;
}
diff --git a/mojo/core/node_controller.cc b/mojo/core/node_controller.cc
index 965ee84fbdbf..dcf2c2c1fb0b 100644
--- a/mojo/core/node_controller.cc
+++ b/mojo/core/node_controller.cc
@@ -15,6 +15,7 @@
#include "base/macros.h"
#include "base/process/process_handle.h"
#include "base/rand_util.h"
+#include "base/record_replay.h"
#include "base/task/current_thread.h"
#include "base/time/time.h"
#include "base/timer/elapsed_timer.h"
@@ -149,7 +150,13 @@ NodeController::~NodeController() = default;
NodeController::NodeController(Core* core)
: core_(core),
name_(GetRandomNodeName()),
- node_(new ports::Node(name_, this)) {
+ node_(new ports::Node(name_, this)),
+ peers_lock_("NodeController.peers_lock_"),
+ reserved_ports_lock_("NodeController.reserved_ports_lock_"),
+ pending_port_merges_lock_("NodeController.pending_port_merges_lock_"),
+ inviter_lock_("NodeController.inviter_lock_"),
+ broker_lock_("NodeController.broker_lock_"),
+ shutdown_lock_("NodeController.shutdown_lock_") {
DVLOG(1) << "Initializing node " << name_;
}
@@ -268,6 +275,8 @@ int NodeController::SendUserMessage(
void NodeController::MergePortIntoInviter(const std::string& name,
const ports::PortRef& port) {
+ recordreplay::Assert("NodeController::MergePortIntoInviter Start");
+
scoped_refptr<NodeChannel> inviter;
bool reject_merge = false;
{
@@ -277,6 +286,8 @@ void NodeController::MergePortIntoInviter(const std::string& name,
// |pending_port_merges_|.
base::AutoLock lock(pending_port_merges_lock_);
inviter = GetInviterChannel();
+ recordreplay::Assert("NodeController::MergePortIntoInviter #1 %d %d",
+ reject_pending_merges_, !!inviter);
if (reject_pending_merges_) {
reject_merge = true;
} else if (!inviter) {
@@ -288,10 +299,13 @@ void NodeController::MergePortIntoInviter(const std::string& name,
node_->ClosePort(port);
DVLOG(2) << "Rejecting port merge for name " << name
<< " due to closed inviter channel.";
+ recordreplay::Assert("NodeController::MergePortIntoInviter #2");
return;
}
+ recordreplay::Assert("NodeController::MergePortIntoInviter #3");
inviter->RequestPortMerge(port.name(), name);
+ recordreplay::Assert("NodeController::MergePortIntoInviter Done");
}
int NodeController::MergeLocalPorts(const ports::PortRef& port0,
@@ -745,6 +759,12 @@ void NodeController::DropAllPeers() {
void NodeController::ForwardEvent(const ports::NodeName& node,
ports::ScopedEvent event) {
DCHECK(event);
+
+ recordreplay::Assert("NodeController::ForwardEvent %d %lu %lu %lu %lu",
+ node == name_,
+ name_.v1, name_.v2,
+ node.v1, node.v2);
+
if (node == name_)
node_->AcceptEvent(std::move(event));
else
@@ -1018,6 +1038,8 @@ void NodeController::OnEventMessage(const ports::NodeName& from_node,
Channel::MessagePtr channel_message) {
DCHECK(io_task_runner_->RunsTasksInCurrentSequence());
+ recordreplay::Assert("NodeController::OnEventMessage Start");
+
auto event = DeserializeEventMessage(from_node, std::move(channel_message));
if (!event) {
// We silently ignore unparseable events, as they may come from a process
@@ -1029,6 +1051,8 @@ void NodeController::OnEventMessage(const ports::NodeName& from_node,
node_->AcceptEvent(std::move(event));
AttemptShutdownIfRequested();
+
+ recordreplay::Assert("NodeController::OnEventMessage Done");
}
void NodeController::OnRequestPortMerge(
diff --git a/mojo/core/ports/node.cc b/mojo/core/ports/node.cc
index 7907b38d4e69..163338228032 100644
--- a/mojo/core/ports/node.cc
+++ b/mojo/core/ports/node.cc
@@ -17,6 +17,7 @@
#include "base/memory/ref_counted.h"
#include "base/notreached.h"
#include "base/optional.h"
+#include "base/record_replay.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_local.h"
#include "build/build_config.h"
@@ -47,7 +48,7 @@ constexpr size_t kRandomNameCacheSize = 256;
// to collisions between independently generated names in different processes.
class RandomNameGenerator {
public:
- RandomNameGenerator() = default;
+ RandomNameGenerator() : lock_("RandomNameGenerator.lock_") {}
~RandomNameGenerator() = default;
PortName GenerateRandomPortName() {
@@ -98,12 +99,13 @@ bool CanAcceptMoreMessages(const Port* port) {
void GenerateRandomPortName(PortName* name) {
*name = g_name_generator.Get().GenerateRandomPortName();
+ recordreplay::Assert("GenerateRandomPortName %lu %lu", name->v1, name->v2);
}
} // namespace
Node::Node(const NodeName& name, NodeDelegate* delegate)
- : name_(name), delegate_(this, delegate) {}
+ : name_(name), delegate_(this, delegate), ports_lock_("Node.ports_lock_") {}
Node::~Node() {
if (!ports_.empty())
@@ -206,6 +208,8 @@ int Node::InitializePort(const PortRef& port_ref,
int Node::CreatePortPair(PortRef* port0_ref, PortRef* port1_ref) {
int rv;
+ recordreplay::Assert("Node::CreatePortPair %lu %lu", name_.v1, name_.v2);
+
rv = CreateUninitializedPort(port0_ref);
if (rv != OK)
return rv;
@@ -382,6 +386,7 @@ int Node::GetMessage(const PortRef& port_ref,
int Node::SendUserMessage(const PortRef& port_ref,
std::unique_ptr<UserMessageEvent> message) {
+ recordreplay::Assert("Node::SendUserMessage Start");
int rv = SendUserMessageInternal(port_ref, &message);
if (rv != OK) {
// If send failed, close all carried ports. Note that we're careful not to
@@ -396,6 +401,7 @@ int Node::SendUserMessage(const PortRef& port_ref,
ClosePort(port);
}
}
+ recordreplay::Assert("Node::SendUserMessage Done %d", rv);
return rv;
}
@@ -429,6 +435,7 @@ int Node::SetAcknowledgeRequestInterval(
}
int Node::AcceptEvent(ScopedEvent event) {
+ recordreplay::Assert("Node::AcceptEvent %d", event->type());
switch (event->type()) {
case Event::Type::kUserMessage:
return OnUserMessage(Event::Cast<UserMessageEvent>(&event));
@@ -598,9 +605,14 @@ int Node::OnUserMessage(std::unique_ptr<UserMessageEvent> message) {
}
int Node::OnPortAccepted(std::unique_ptr<PortAcceptedEvent> event) {
+ recordreplay::Assert("Node::OnPortAccepted Start %lu %lu",
+ event->port_name().v1, event->port_name().v2);
+
PortRef port_ref;
- if (GetPort(event->port_name(), &port_ref) != OK)
+ if (GetPort(event->port_name(), &port_ref) != OK) {
+ recordreplay::Assert("Node::OnPortAccepted UnknownPort");
return ERROR_PORT_UNKNOWN;
+ }
#if DCHECK_IS_ON()
{
@@ -615,6 +627,8 @@ int Node::OnPortAccepted(std::unique_ptr<PortAcceptedEvent> event) {
}
int Node::OnObserveProxy(std::unique_ptr<ObserveProxyEvent> event) {
+ recordreplay::Assert("Node::OnObserveProxy Start");
+
if (event->port_name() == kInvalidPortName) {
// An ObserveProxy with an invalid target port name is a broadcast used to
// inform ports when their peer (which was itself a proxy) has become
@@ -626,6 +640,7 @@ int Node::OnObserveProxy(std::unique_ptr<ObserveProxyEvent> event) {
DCHECK_EQ(event->proxy_target_node_name(), kInvalidNodeName);
DCHECK_EQ(event->proxy_target_port_name(), kInvalidPortName);
DestroyAllPortsWithPeer(event->proxy_node_name(), event->proxy_port_name());
+ recordreplay::Assert("Node::OnObserveProxy #1");
return OK;
}
@@ -636,6 +651,7 @@ int Node::OnObserveProxy(std::unique_ptr<ObserveProxyEvent> event) {
if (GetPort(event->port_name(), &port_ref) != OK) {
DVLOG(1) << "ObserveProxy: " << event->port_name() << "@" << name_
<< " not found";
+ recordreplay::Assert("Node::OnObserveProxy #2");
return OK;
}
@@ -697,8 +713,13 @@ int Node::OnObserveProxy(std::unique_ptr<ObserveProxyEvent> event) {
}
}
- if (event_to_forward)
+ recordreplay::Assert("Node::OnObserveProxy ForwardEvent #3 %d", !!event_to_forward);
+
+ if (event_to_forward) {
+ recordreplay::Assert("Node::OnObserveProxy ForwardEvent #1");
delegate_->ForwardEvent(event_target_node, std::move(event_to_forward));
+ recordreplay::Assert("Node::OnObserveProxy ForwardEvent #2");
+ }
if (peer_changed) {
// Re-send ack and/or ack requests, as the previous peer proxy may not have
@@ -709,6 +730,7 @@ int Node::OnObserveProxy(std::unique_ptr<ObserveProxyEvent> event) {
delegate_->PortStatusChanged(port_ref);
}
+ recordreplay::Assert("Node::OnObserveProxy Done");
return OK;
}
@@ -1048,16 +1070,34 @@ int Node::SendUserMessageInternal(const PortRef& port_ref,
int Node::MergePortsInternal(const PortRef& port0_ref,
const PortRef& port1_ref,
bool allow_close_on_bad_state) {
+ recordreplay::Assert("Node::MergePortsInternal %lu %lu %lu %lu",
+ port0_ref.name().v1, port0_ref.name().v2,
+ port1_ref.name().v1, port1_ref.name().v2);
+
const PortRef* port_refs[2] = {&port0_ref, &port1_ref};
{
// Needed to swap peer map entries below.
PortLocker::AssertNoPortsLockedOnCurrentThread();
base::ReleasableAutoLock ports_locker(&ports_lock_);
+ recordreplay::Assert("Node::MergePortsInternal #0.0 %lu %lu",
+ port_refs[0]->name().v1, port_refs[0]->name().v2);
+
base::Optional<PortLocker> locker(base::in_place, port_refs, 2);
+
+ recordreplay::Assert("Node::MergePortsInternal #0.01 %lu %lu",
+ port_refs[0]->name().v1, port_refs[0]->name().v2);
+
auto* port0 = locker->GetPort(port0_ref);
+
+ recordreplay::Assert("Node::MergePortsInternal #0.02 %lu %lu",
+ port_refs[0]->name().v1, port_refs[0]->name().v2);
+
auto* port1 = locker->GetPort(port1_ref);
+ recordreplay::Assert("Node::MergePortsInternal #0.03 %lu %lu",
+ port_refs[0]->name().v1, port_refs[0]->name().v2);
+
// There are several conditions which must be met before we'll consider
// merging two ports:
//
@@ -1073,6 +1113,7 @@ int Node::MergePortsInternal(const PortRef& port0_ref,
port1->peer_port_name == port0_ref.name()) ||
port0->next_sequence_num_to_send != kInitialSequenceNum ||
port1->next_sequence_num_to_send != kInitialSequenceNum) {
+ recordreplay::Assert("Node::MergePortsInternal #1");
// On failure, we only close a port if it was at least properly in the
// |kReceiving| state. This avoids getting the system in an inconsistent
// state by e.g. closing a proxy abruptly.
@@ -1091,6 +1132,9 @@ int Node::MergePortsInternal(const PortRef& port0_ref,
return ERROR_PORT_STATE_UNEXPECTED;
}
+ recordreplay::Assert("Node::MergePortsInternal #0.1 %lu %lu",
+ port_refs[0]->name().v1, port_refs[0]->name().v2);
+
// Swap the ports' peer information and switch them both to proxying mode.
SwapPortPeers(port0_ref.name(), port0, port1_ref.name(), port1);
port0->state = Port::kProxying;
@@ -1099,13 +1143,19 @@ int Node::MergePortsInternal(const PortRef& port0_ref,
port0->remove_proxy_on_last_message = true;
if (port1->peer_closed)
port1->remove_proxy_on_last_message = true;
+
+ recordreplay::Assert("Node::MergePortsInternal #0.2 %lu %lu",
+ port_refs[0]->name().v1, port_refs[0]->name().v2);
}
// Flush any queued messages from the new proxies and, if successful, complete
// the merge by initiating proxy removals.
if (ForwardUserMessagesFromProxy(port0_ref) == OK &&
ForwardUserMessagesFromProxy(port1_ref) == OK) {
+ recordreplay::Assert("Node::MergePortsInternal #2");
for (size_t i = 0; i < 2; ++i) {
+ recordreplay::Assert("Node::MergePortsInternal #2.1 %lu %lu %lu", i,
+ port_refs[i]->name().v1, port_refs[i]->name().v2);
bool try_remove_proxy_immediately = false;
ScopedEvent closure_event;
NodeName closure_event_target_node;
@@ -1122,6 +1172,7 @@ int Node::MergePortsInternal(const PortRef& port0_ref,
port->peer_port_name, port->last_sequence_num_to_receive);
}
}
+ recordreplay::Assert("Node::MergePortsInternal #3 %d", try_remove_proxy_immediately);
if (try_remove_proxy_immediately)
TryRemoveProxy(*port_refs[i]);
else
@@ -1133,6 +1184,7 @@ int Node::MergePortsInternal(const PortRef& port0_ref,
}
}
+ recordreplay::Assert("Node::MergePortsInternal Done");
return OK;
}
@@ -1153,6 +1205,7 @@ int Node::MergePortsInternal(const PortRef& port0_ref,
port1->state = Port::kReceiving;
}
+ recordreplay::Assert("Node::MergePortsInternal #4");
ClosePort(port0_ref);
ClosePort(port1_ref);
return ERROR_PORT_STATE_UNEXPECTED;
@@ -1368,17 +1421,23 @@ int Node::PrepareToForwardUserMessage(const PortRef& forwarding_port_ref,
}
int Node::BeginProxying(const PortRef& port_ref) {
+ recordreplay::Assert("Node::BeginProxying Start");
+
{
SinglePortLocker locker(&port_ref);
auto* port = locker.port();
- if (port->state != Port::kBuffering)
+ if (port->state != Port::kBuffering) {
+ recordreplay::Assert("Node::BeginProxying OOPS #1");
return OOPS(ERROR_PORT_STATE_UNEXPECTED);
+ }
port->state = Port::kProxying;
}
int rv = ForwardUserMessagesFromProxy(port_ref);
- if (rv != OK)
+ if (rv != OK) {
+ recordreplay::Assert("Node::BeginProxying #1");
return rv;
+ }
// Forward any pending acknowledge request.
MaybeForwardAckRequest(port_ref);
@@ -1389,8 +1448,10 @@ int Node::BeginProxying(const PortRef& port_ref) {
{
SinglePortLocker locker(&port_ref);
auto* port = locker.port();
- if (port->state != Port::kProxying)
+ if (port->state != Port::kProxying) {
+ recordreplay::Assert("Node::BeginProxying OOPS #2");
return OOPS(ERROR_PORT_STATE_UNEXPECTED);
+ }
try_remove_proxy_immediately = port->remove_proxy_on_last_message;
if (try_remove_proxy_immediately) {
@@ -1408,10 +1469,13 @@ int Node::BeginProxying(const PortRef& port_ref) {
InitiateProxyRemoval(port_ref);
}
+ recordreplay::Assert("Node::BeginProxying Done");
return OK;
}
int Node::ForwardUserMessagesFromProxy(const PortRef& port_ref) {
+ recordreplay::Assert("Node::ForwardUserMessagesFromProxy Start");
+
for (;;) {
// NOTE: We forward messages in sequential order here so that we maintain
// the message queue's notion of next sequence number. That's useful for the
@@ -1421,6 +1485,7 @@ int Node::ForwardUserMessagesFromProxy(const PortRef& port_ref) {
{
SinglePortLocker locker(&port_ref);
locker.port()->message_queue.GetNextMessage(&message, nullptr);
+ recordreplay::Assert("Node::ForwardUserMessagesFromProxy #1 %d", !!message);
if (!message)
break;
}
@@ -1429,15 +1494,21 @@ int Node::ForwardUserMessagesFromProxy(const PortRef& port_ref) {
int rv = PrepareToForwardUserMessage(port_ref, Port::kProxying,
true /* ignore_closed_peer */,
message.get(), &target_node);
- if (rv != OK)
+ if (rv != OK) {
+ recordreplay::Assert("Node::ForwardUserMessagesFromProxy #2");
return rv;
+ }
delegate_->ForwardEvent(target_node, std::move(message));
}
+ recordreplay::Assert("Node::ForwardUserMessagesFromProxy Done");
return OK;
}
void Node::InitiateProxyRemoval(const PortRef& port_ref) {
+ recordreplay::Assert("Node::InitiateProxyRemoval Start %lu %lu",
+ port_ref.name().v1, port_ref.name().v2);
+
NodeName peer_node_name;
PortName peer_port_name;
{
@@ -1455,6 +1526,8 @@ void Node::InitiateProxyRemoval(const PortRef& port_ref) {
std::make_unique<ObserveProxyEvent>(
peer_port_name, name_, port_ref.name(),
peer_node_name, peer_port_name));
+
+ recordreplay::Assert("Node::InitiateProxyRemoval Done");
}
void Node::TryRemoveProxy(const PortRef& port_ref) {
diff --git a/mojo/core/ports/port.cc b/mojo/core/ports/port.cc
index c46dc9ed25be..ef9dfd685523 100644
--- a/mojo/core/ports/port.cc
+++ b/mojo/core/ports/port.cc
@@ -4,6 +4,8 @@
#include "mojo/core/ports/port.h"
+#include "base/record_replay.h"
+
namespace mojo {
namespace core {
namespace ports {
@@ -19,9 +21,15 @@ Port::Port(uint64_t next_sequence_num_to_send,
message_queue(next_sequence_num_to_receive),
remove_proxy_on_last_message(false),
peer_closed(false),
- peer_lost_unexpectedly(false) {}
+ peer_lost_unexpectedly(false),
+ lock_("Port.lock_") {
+ // Registering new ports is needed for sorting, see port_locker.cc
+ recordreplay::RegisterPointer(this);
+}
-Port::~Port() = default;
+Port::~Port() {
+ recordreplay::UnregisterPointer(this);
+}
} // namespace ports
} // namespace core
diff --git a/mojo/core/ports/port_locker.cc b/mojo/core/ports/port_locker.cc
index 880492332ddd..1c6213ec38ab 100644
--- a/mojo/core/ports/port_locker.cc
+++ b/mojo/core/ports/port_locker.cc
@@ -6,6 +6,7 @@
#include <algorithm>
+#include "base/record_replay.h"
#include "mojo/core/ports/port.h"
#if DCHECK_IS_ON()
@@ -30,6 +31,22 @@ void UpdateTLS(PortLocker* old_locker, PortLocker* new_locker) {
} // namespace
+static uintptr_t GetPortId(Port* port) {
+ // When recording/replaying the sorted order of ports need to be consistent,
+ // so we use the ID associated with the port via RegisterPointer for sorting.
+#ifdef OS_MAC
+ if (recordreplay::IsRecordingOrReplaying()) {
+ uintptr_t id = recordreplay::PointerId(port);
+ CHECK(id);
+ return id;
+ } else {
+ return (uintptr_t)port;
+ }
+#else
+ return (uintptr_t)port;
+#endif
+}
+
PortLocker::PortLocker(const PortRef** port_refs, size_t num_ports)
: port_refs_(port_refs), num_ports_(num_ports) {
#if DCHECK_IS_ON()
@@ -39,7 +56,9 @@ PortLocker::PortLocker(const PortRef** port_refs, size_t num_ports)
// Sort the ports by address to lock them in a globally consistent order.
std::sort(
port_refs_, port_refs_ + num_ports_,
- [](const PortRef* a, const PortRef* b) { return a->port() < b->port(); });
+ [](const PortRef* a, const PortRef* b) {
+ return GetPortId(a->port()) < GetPortId(b->port());
+ });
for (size_t i = 0; i < num_ports_; ++i) {
// TODO(crbug.com/725605): Remove this CHECK.
CHECK(port_refs_[i]->port());
diff --git a/mojo/core/request_context.cc b/mojo/core/request_context.cc
index 4ede95c41468..c0a42b2d4f8e 100644
--- a/mojo/core/request_context.cc
+++ b/mojo/core/request_context.cc
@@ -6,6 +6,7 @@
#include "base/check.h"
#include "base/lazy_instance.h"
+#include "base/record_replay.h"
#include "base/threading/thread_local.h"
namespace mojo {
@@ -29,7 +30,11 @@ RequestContext::RequestContext(Source source)
}
RequestContext::~RequestContext() {
+ recordreplay::Assert("RequestContext::~RequestContext Start");
+
if (IsCurrent()) {
+ recordreplay::Assert("RequestContext::~RequestContext #1");
+
// NOTE: Callbacks invoked by this destructor are allowed to initiate new
// EDK requests on this thread, so we need to reset the thread-local context
// pointer before calling them. We persist the original notification source
@@ -63,19 +68,25 @@ RequestContext::~RequestContext() {
// treating all nested trap events as if they originated from a local API
// call even if this is a system RequestContext.
RequestContext inner_context(Source::LOCAL_API_CALL);
+ recordreplay::Assert("RequestContext::~RequestContext #2");
watch->InvokeCallback(MOJO_RESULT_CANCELLED, closed_state, flags);
+ recordreplay::Assert("RequestContext::~RequestContext #3");
}
for (const WatchNotifyFinalizer& watch :
watch_notify_finalizers_.container()) {
RequestContext inner_context(source_);
+ recordreplay::Assert("RequestContext::~RequestContext #4");
watch.watch->InvokeCallback(watch.result, watch.state, flags);
+ recordreplay::Assert("RequestContext::~RequestContext #5");
}
} else {
// It should be impossible for nested contexts to have finalizers.
DCHECK(watch_notify_finalizers_.container().empty());
DCHECK(watch_cancel_finalizers_.container().empty());
}
+
+ recordreplay::Assert("RequestContext::~RequestContext Done");
}
// static
@@ -87,6 +98,7 @@ RequestContext* RequestContext::current() {
void RequestContext::AddWatchNotifyFinalizer(scoped_refptr<Watch> watch,
MojoResult result,
const HandleSignalsState& state) {
+ recordreplay::Assert("RequestContext::AddWatchNotifyFinalizer");
DCHECK(IsCurrent());
watch_notify_finalizers_->push_back(
WatchNotifyFinalizer(std::move(watch), result, state));
diff --git a/mojo/core/watch.cc b/mojo/core/watch.cc
index 0d0429da0975..12cb5679ebf8 100644
--- a/mojo/core/watch.cc
+++ b/mojo/core/watch.cc
@@ -4,6 +4,7 @@
#include "mojo/core/watch.h"
+#include "base/record_replay.h"
#include "mojo/core/request_context.h"
#include "mojo/core/watcher_dispatcher.h"
@@ -19,12 +20,17 @@ Watch::Watch(const scoped_refptr<WatcherDispatcher>& watcher,
dispatcher_(dispatcher),
context_(context),
signals_(signals),
- condition_(condition) {}
+ condition_(condition),
+ notification_lock_("Watch.notification_lock_") {
+ recordreplay::RegisterPointer(this);
+}
bool Watch::NotifyState(const HandleSignalsState& state,
bool allowed_to_call_callback) {
AssertWatcherLockAcquired();
+ recordreplay::Assert("Watch::NotifyState %lu %d", recordreplay::PointerId(this), allowed_to_call_callback);
+
// NOTE: This method must NEVER call into |dispatcher_| directly, because it
// may be called while |dispatcher_| holds a lock.
MojoResult rv = MOJO_RESULT_SHOULD_WAIT;
@@ -36,6 +42,8 @@ bool Watch::NotifyState(const HandleSignalsState& state,
condition_ == MOJO_TRIGGER_CONDITION_SIGNALS_UNSATISFIED);
if (notify_success) {
rv = MOJO_RESULT_OK;
+ recordreplay::Assert("Watch::NotifyState #0 %d %d",
+ allowed_to_call_callback, last_known_result_);
if (allowed_to_call_callback && rv != last_known_result_) {
request_context->AddWatchNotifyFinalizer(this, MOJO_RESULT_OK, state);
}
@@ -43,6 +51,7 @@ bool Watch::NotifyState(const HandleSignalsState& state,
!state.can_satisfy_any(signals_)) {
rv = MOJO_RESULT_FAILED_PRECONDITION;
if (allowed_to_call_callback && rv != last_known_result_) {
+ recordreplay::Assert("Watch::NotifyState #1 %lu", recordreplay::PointerId(this));
request_context->AddWatchNotifyFinalizer(
this, MOJO_RESULT_FAILED_PRECONDITION, state);
}
@@ -55,6 +64,7 @@ bool Watch::NotifyState(const HandleSignalsState& state,
}
void Watch::Cancel() {
+ recordreplay::Assert("Watch::Cancel %lu", recordreplay::PointerId(this));
RequestContext::current()->AddWatchCancelFinalizer(this);
}
@@ -78,7 +88,9 @@ void Watch::InvokeCallback(MojoResult result,
watcher_->InvokeWatchCallback(context_, result, state, flags);
}
-Watch::~Watch() = default;
+Watch::~Watch() {
+ recordreplay::UnregisterPointer(this);
+}
#if DCHECK_IS_ON()
void Watch::AssertWatcherLockAcquired() const {
diff --git a/mojo/core/watcher_dispatcher.cc b/mojo/core/watcher_dispatcher.cc
index 00d2a431036e..be4968869fce 100644
--- a/mojo/core/watcher_dispatcher.cc
+++ b/mojo/core/watcher_dispatcher.cc
@@ -11,16 +11,22 @@
#include "base/debug/alias.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
+#include "base/record_replay.h"
#include "mojo/core/watch.h"
namespace mojo {
namespace core {
WatcherDispatcher::WatcherDispatcher(MojoTrapEventHandler handler)
- : handler_(handler) {}
+ : handler_(handler),
+ lock_("WatcherDispatcher.lock_") {
+ // Registering dispatchers is needed for deterministic sort order in WatcherSets.
+ recordreplay::RegisterPointer(this);
+}
void WatcherDispatcher::NotifyHandleState(Dispatcher* dispatcher,
const HandleSignalsState& state) {
+ recordreplay::Assert("WatcherDispatcher::NotifyHandleState %lu", recordreplay::PointerId(this));
base::AutoLock lock(lock_);
auto it = watched_handles_.find(dispatcher);
if (it == watched_handles_.end())
@@ -66,6 +72,8 @@ void WatcherDispatcher::InvokeWatchCallback(uintptr_t context,
MojoResult result,
const HandleSignalsState& state,
MojoTrapEventFlags flags) {
+ recordreplay::Assert("WatcherDispatcher::InvokeWatchCallback Start");
+
MojoTrapEvent event;
event.struct_size = sizeof(event);
event.trigger_context = context;
@@ -87,11 +95,15 @@ void WatcherDispatcher::InvokeWatchCallback(uintptr_t context,
// This guarantee is sufficient to make safe, synchronized, per-context
// state management possible in user code.
base::AutoLock lock(lock_);
- if (closed_ && result != MOJO_RESULT_CANCELLED)
+ if (closed_ && result != MOJO_RESULT_CANCELLED) {
+ recordreplay::Assert("WatcherDispatcher::InvokeWatchCallback #1");
return;
+ }
}
handler_(&event);
+
+ recordreplay::Assert("WatcherDispatcher::InvokeWatchCallback Done");
}
Dispatcher::Type WatcherDispatcher::GetType() const {
@@ -210,18 +222,26 @@ MojoResult WatcherDispatcher::CancelWatch(uintptr_t context) {
MojoResult WatcherDispatcher::Arm(uint32_t* num_blocking_events,
MojoTrapEvent* blocking_events) {
+ recordreplay::Assert("WatcherDispatcher::Arm Start");
base::AutoLock lock(lock_);
- if (num_blocking_events && !blocking_events)
+ if (num_blocking_events && !blocking_events) {
+ recordreplay::Assert("WatcherDispatcher::Arm #1");
return MOJO_RESULT_INVALID_ARGUMENT;
- if (closed_)
+ }
+ if (closed_) {
+ recordreplay::Assert("WatcherDispatcher::Arm #2");
return MOJO_RESULT_INVALID_ARGUMENT;
+ }
- if (watched_handles_.empty())
+ if (watched_handles_.empty()) {
+ recordreplay::Assert("WatcherDispatcher::Arm #3");
return MOJO_RESULT_NOT_FOUND;
+ }
if (ready_watches_.empty()) {
// Fast path: No watches are ready to notify, so we're done.
armed_ = true;
+ recordreplay::Assert("WatcherDispatcher::Arm #4");
return MOJO_RESULT_OK;
}
@@ -258,10 +278,13 @@ MojoResult WatcherDispatcher::Arm(uint32_t* num_blocking_events,
}
}
+ recordreplay::Assert("WatcherDispatcher::Arm Done");
return MOJO_RESULT_FAILED_PRECONDITION;
}
-WatcherDispatcher::~WatcherDispatcher() = default;
+WatcherDispatcher::~WatcherDispatcher() {
+ recordreplay::UnregisterPointer(this);
+}
} // namespace core
} // namespace mojo
diff --git a/mojo/core/watcher_set.h b/mojo/core/watcher_set.h
index 6abd43e0999d..c72fb1622fa7 100644
--- a/mojo/core/watcher_set.h
+++ b/mojo/core/watcher_set.h
@@ -9,6 +9,7 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/optional.h"
+#include "base/record_replay.h"
#include "mojo/core/handle_signals_state.h"
#include "mojo/core/watcher_dispatcher.h"
@@ -58,7 +59,7 @@ class WatcherSet {
};
Dispatcher* const owner_;
- base::flat_map<WatcherDispatcher*, Entry> watchers_;
+ base::flat_map<WatcherDispatcher*, Entry, recordreplay::CompareByPointerId> watchers_;
base::Optional<HandleSignalsState> last_known_state_;
DISALLOW_COPY_AND_ASSIGN(WatcherSet);
diff --git a/mojo/public/cpp/bindings/lib/connector.cc b/mojo/public/cpp/bindings/lib/connector.cc
index 2766f8d65c35..9974edc77d39 100644
--- a/mojo/public/cpp/bindings/lib/connector.cc
+++ b/mojo/public/cpp/bindings/lib/connector.cc
@@ -16,6 +16,7 @@
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/rand_util.h"
+#include "base/record_replay.h"
#include "base/run_loop.h"
#include "base/synchronization/lock.h"
#include "base/task/current_thread.h"
@@ -219,6 +220,8 @@ ScopedMessagePipeHandle Connector::PassMessagePipe() {
}
void Connector::RaiseError() {
+ recordreplay::Assert("Connector::RaiseError");
+
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
HandleError(true, true);
@@ -233,6 +236,8 @@ void Connector::SetConnectionGroup(ConnectionGroup::Ref ref) {
}
bool Connector::WaitForIncomingMessage() {
+ recordreplay::Assert("Connector::WaitForIncomingMessage Start");
+
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (error_)
@@ -387,6 +392,8 @@ void Connector::OnSyncHandleWatcherHandleReady(MojoResult result) {
}
void Connector::OnHandleReadyInternal(MojoResult result) {
+ recordreplay::Assert("Connector::OnHandleReadyInternal Start");
+
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (result == MOJO_RESULT_FAILED_PRECONDITION) {
@@ -473,6 +480,8 @@ MojoResult Connector::ReadMessage(Message* message) {
}
bool Connector::DispatchMessage(Message message) {
+ recordreplay::Assert("Connector::DispatchMessage Start");
+
DCHECK(!paused_);
base::WeakPtr<Connector> weak_self = weak_self_;
@@ -551,19 +560,28 @@ void Connector::ScheduleDispatchOfPendingMessagesOrWaitForMore(
}
void Connector::ReadAllAvailableMessages() {
- if (paused_ || error_)
+ recordreplay::Assert("Connector::ReadAllAvailableMessages Start");
+
+ if (paused_ || error_) {
+ recordreplay::Assert("Connector::ReadAllAvailableMessages #1");
return;
+ }
base::WeakPtr<Connector> weak_self = weak_self_;
do {
+ recordreplay::Assert("Connector::ReadAllAvailableMessages #2");
+
Message message;
MojoResult rv = ReadMessage(&message);
+ recordreplay::Assert("Connector::ReadAllAvailableMessages #3 %d", rv);
+
switch (rv) {
case MOJO_RESULT_OK:
DCHECK(!message.IsNull());
if (!DispatchMessage(std::move(message)) || !weak_self || paused_) {
+ recordreplay::Assert("Connector::ReadAllAvailableMessages #4");
return;
}
break;
@@ -587,12 +605,18 @@ void Connector::ReadAllAvailableMessages() {
false /* force_async_handler */);
return;
}
+
+ recordreplay::Assert("Connector::ReadAllAvailableMessages #5");
} while (weak_self && should_dispatch_messages_immediately());
+ recordreplay::Assert("Connector::ReadAllAvailableMessages #6 %d", !!weak_self);
+
if (weak_self) {
const auto pending_message_count = QueryPendingMessageCount();
ScheduleDispatchOfPendingMessagesOrWaitForMore(pending_message_count);
}
+
+ recordreplay::Assert("Connector::ReadAllAvailableMessages Done");
}
void Connector::CancelWait() {
@@ -602,8 +626,12 @@ void Connector::CancelWait() {
}
void Connector::HandleError(bool force_pipe_reset, bool force_async_handler) {
- if (error_ || !message_pipe_.is_valid())
+ recordreplay::Assert("Connector::HandleError Start");
+
+ if (error_ || !message_pipe_.is_valid()) {
+ recordreplay::Assert("Connector::HandleError #1");
return;
+ }
if (paused_) {
// Enforce calling the error handler asynchronously if the user has paused
@@ -616,15 +644,20 @@ void Connector::HandleError(bool force_pipe_reset, bool force_async_handler) {
force_pipe_reset = true;
if (force_pipe_reset) {
+ recordreplay::Assert("Connector::HandleError #2");
CancelWait();
+ recordreplay::Assert("Connector::HandleError #3");
internal::MayAutoLock locker(&lock_);
message_pipe_.reset();
MessagePipe dummy_pipe;
message_pipe_ = std::move(dummy_pipe.handle0);
} else {
+ recordreplay::Assert("Connector::HandleError #4");
CancelWait();
}
+ recordreplay::Assert("Connector::HandleError #5");
+
if (force_async_handler) {
if (!paused_)
WaitToReadMore();
@@ -633,6 +666,8 @@ void Connector::HandleError(bool force_pipe_reset, bool force_async_handler) {
if (connection_error_handler_)
std::move(connection_error_handler_).Run();
}
+
+ recordreplay::Assert("Connector::HandleError Done");
}
void Connector::EnsureSyncWatcherExists() {
diff --git a/mojo/public/cpp/bindings/lib/interface_endpoint_client.cc b/mojo/public/cpp/bindings/lib/interface_endpoint_client.cc
index ad1d8723c598..269b1230f902 100644
--- a/mojo/public/cpp/bindings/lib/interface_endpoint_client.cc
+++ b/mojo/public/cpp/bindings/lib/interface_endpoint_client.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
+#include "base/record_replay.h"
#include "base/sequenced_task_runner.h"
#include "base/stl_util.h"
#include "mojo/public/cpp/bindings/associated_group.h"
@@ -502,6 +503,8 @@ void InterfaceEndpointClient::OnAssociationEvent(
bool InterfaceEndpointClient::HandleValidatedMessage(Message* message) {
DCHECK_EQ(handle_.id(), message->interface_id());
+ recordreplay::Assert("InterfaceEndpointClient::HandleValidatedMessage Start");
+
if (encountered_error_) {
// This message is received after error has been encountered. For associated
// interfaces, this means the remote side sends a
@@ -509,6 +512,7 @@ bool InterfaceEndpointClient::HandleValidatedMessage(Message* message) {
// for the same interface. Close the pipe because this shouldn't happen.
DVLOG(1) << "A message is received for an interface after it has been "
<< "disconnected. Closing the pipe.";
+ recordreplay::Assert("InterfaceEndpointClient::HandleValidatedMessage #1");
return false;
}
@@ -520,8 +524,10 @@ bool InterfaceEndpointClient::HandleValidatedMessage(Message* message) {
auto responder = std::make_unique<ResponderThunk>(
weak_ptr_factory_.GetWeakPtr(), task_runner_);
if (mojo::internal::ControlMessageHandler::IsControlMessage(message)) {
- return control_message_handler_.AcceptWithResponder(message,
- std::move(responder));
+ bool rv = control_message_handler_.AcceptWithResponder(message,
+ std::move(responder));
+ recordreplay::Assert("InterfaceEndpointClient::HandleValidatedMessage #2 %d", rv);
+ return rv;
} else {
if (idle_tracking_connection_group_)
responder->set_connection_group(idle_tracking_connection_group_);
@@ -534,22 +540,32 @@ bool InterfaceEndpointClient::HandleValidatedMessage(Message* message) {
if (message->has_flag(Message::kFlagIsSync) &&
!force_outgoing_messages_async_) {
auto it = sync_responses_.find(request_id);
- if (it == sync_responses_.end())
+ if (it == sync_responses_.end()) {
+ recordreplay::Assert("InterfaceEndpointClient::HandleValidatedMessage #3");
return false;
+ }
it->second->response = std::move(*message);
*it->second->response_received = true;
+ recordreplay::Assert("InterfaceEndpointClient::HandleValidatedMessage #4");
return true;
}
auto it = async_responders_.find(request_id);
- if (it == async_responders_.end())
+ if (it == async_responders_.end()) {
+ recordreplay::Assert("InterfaceEndpointClient::HandleValidatedMessage #5");
return false;
+ }
std::unique_ptr<MessageReceiver> responder = std::move(it->second);
async_responders_.erase(it);
- return responder->Accept(message);
+ bool rv = responder->Accept(message);
+ recordreplay::Assert("InterfaceEndpointClient::HandleValidatedMessage #6 %d", rv);
+ return rv;
} else {
- if (mojo::internal::ControlMessageHandler::IsControlMessage(message))
- return control_message_handler_.Accept(message);
+ if (mojo::internal::ControlMessageHandler::IsControlMessage(message)) {
+ bool rv = control_message_handler_.Accept(message);
+ recordreplay::Assert("InterfaceEndpointClient::HandleValidatedMessage #7 %d", rv);
+ return rv;
+ }
accepted_interface_message = incoming_receiver_->Accept(message);
}
@@ -561,6 +577,8 @@ bool InterfaceEndpointClient::HandleValidatedMessage(Message* message) {
MaybeStartIdleTimer();
}
+ recordreplay::Assert("InterfaceEndpointClient::HandleValidatedMessage Done %d",
+ accepted_interface_message);
return accepted_interface_message;
}
diff --git a/mojo/public/cpp/bindings/lib/multiplex_router.cc b/mojo/public/cpp/bindings/lib/multiplex_router.cc
index 5f2e8141fe15..b9681af6f53b 100644
--- a/mojo/public/cpp/bindings/lib/multiplex_router.cc
+++ b/mojo/public/cpp/bindings/lib/multiplex_router.cc
@@ -12,6 +12,7 @@
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
+#include "base/record_replay.h"
#include "base/sequenced_task_runner.h"
#include "base/stl_util.h"
#include "base/strings/string_util.h"
@@ -331,7 +332,7 @@ MultiplexRouter::MultiplexRouter(
DCHECK(task_runner_->RunsTasksInCurrentSequence());
if (config == MULTI_INTERFACE)
- lock_.emplace();
+ lock_.emplace("MultiplexRouter.lock_");
if (config == SINGLE_INTERFACE_WITH_SYNC_METHODS ||
config == MULTI_INTERFACE) {
@@ -881,6 +882,8 @@ bool MultiplexRouter::ProcessIncomingMessage(
MessageWrapper* message_wrapper,
ClientCallBehavior client_call_behavior,
base::SequencedTaskRunner* current_task_runner) {
+ recordreplay::Assert("MultiplexRouter::ProcessIncomingMessage Start");
+
DCHECK(!current_task_runner ||
current_task_runner->RunsTasksInCurrentSequence());
DCHECK(!paused_);
@@ -891,6 +894,7 @@ bool MultiplexRouter::ProcessIncomingMessage(
if (message->IsNull()) {
// This is a sync message and has been processed during sync handle
// watching.
+ recordreplay::Assert("MultiplexRouter::ProcessIncomingMessage #1");
return true;
}
@@ -908,6 +912,7 @@ bool MultiplexRouter::ProcessIncomingMessage(
if (!result)
RaiseErrorInNonTestingMode();
+ recordreplay::Assert("MultiplexRouter::ProcessIncomingMessage #2");
return true;
}
@@ -915,12 +920,15 @@ bool MultiplexRouter::ProcessIncomingMessage(
DCHECK(IsValidInterfaceId(id));
InterfaceEndpoint* endpoint = FindEndpoint(id);
- if (!endpoint || endpoint->closed())
+ if (!endpoint || endpoint->closed()) {
+ recordreplay::Assert("MultiplexRouter::ProcessIncomingMessage #3");
return true;
+ }
if (!endpoint->client()) {
// We need to wait until a client is attached in order to dispatch further
// messages.
+ recordreplay::Assert("MultiplexRouter::ProcessIncomingMessage #4");
return false;
}
@@ -935,6 +943,7 @@ bool MultiplexRouter::ProcessIncomingMessage(
if (!can_direct_call) {
MaybePostToProcessTasks(endpoint->task_runner());
+ recordreplay::Assert("MultiplexRouter::ProcessIncomingMessage #5");
return false;
}
@@ -957,6 +966,7 @@ bool MultiplexRouter::ProcessIncomingMessage(
if (!result)
RaiseErrorInNonTestingMode();
+ recordreplay::Assert("MultiplexRouter::ProcessIncomingMessage Done");
return true;
}
diff --git a/mojo/public/cpp/bindings/thread_safe_forwarder_base.cc b/mojo/public/cpp/bindings/thread_safe_forwarder_base.cc
index 698a6fbb105a..a14676d30db9 100644
--- a/mojo/public/cpp/bindings/thread_safe_forwarder_base.cc
+++ b/mojo/public/cpp/bindings/thread_safe_forwarder_base.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/check.h"
+#include "base/record_replay.h"
#include "base/stl_util.h"
#include "mojo/public/cpp/bindings/sync_call_restrictions.h"
#include "mojo/public/cpp/bindings/sync_event_watcher.h"
@@ -40,6 +41,7 @@ bool ThreadSafeForwarderBase::PrefersSerializedMessages() {
}
bool ThreadSafeForwarderBase::Accept(Message* message) {
+ recordreplay::Assert("ThreadSafeForwarderBase::Accept Start");
if (!message->associated_endpoint_handles()->empty()) {
// If this DCHECK fails, it is likely because:
// - This is a non-associated interface pointer setup using
@@ -55,6 +57,7 @@ bool ThreadSafeForwarderBase::Accept(Message* message) {
}
task_runner_->PostTask(FROM_HERE,
base::BindOnce(forward_, std::move(*message)));
+ recordreplay::Assert("ThreadSafeForwarderBase::Accept Done");
return true;
}
diff --git a/mojo/public/cpp/system/handle_signal_tracker.cc b/mojo/public/cpp/system/handle_signal_tracker.cc
index ab5ba8315797..06856bb245d7 100644
--- a/mojo/public/cpp/system/handle_signal_tracker.cc
+++ b/mojo/public/cpp/system/handle_signal_tracker.cc
@@ -5,6 +5,7 @@
#include "mojo/public/cpp/system/handle_signal_tracker.h"
#include "base/bind.h"
+#include "base/record_replay.h"
#include "base/synchronization/lock.h"
#include "mojo/public/cpp/system/handle_signals_state.h"
@@ -66,10 +67,14 @@ void HandleSignalTracker::Arm() {
void HandleSignalTracker::OnNotify(MojoResult result,
const HandleSignalsState& state) {
+ recordreplay::Assert("HandleSignalTracker::OnNotify Start");
+
last_known_state_ = state;
Arm();
if (notification_callback_)
notification_callback_.Run(state);
+
+ recordreplay::Assert("HandleSignalTracker::OnNotify Done");
}
} // namespace mojo
diff --git a/mojo/public/cpp/system/simple_watcher.cc b/mojo/public/cpp/system/simple_watcher.cc
index 700fda474bf4..2e04e9ba44e2 100644
--- a/mojo/public/cpp/system/simple_watcher.cc
+++ b/mojo/public/cpp/system/simple_watcher.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
+#include "base/record_replay.h"
#include "base/sequenced_task_runner.h"
#include "base/synchronization/lock.h"
#include "base/task/common/task_annotator.h"
@@ -193,6 +194,8 @@ void SimpleWatcher::Cancel() {
MojoResult SimpleWatcher::Arm(MojoResult* ready_result,
HandleSignalsState* ready_state) {
+ recordreplay::Assert("SimpleWatcher::Arm Start");
+
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
uint32_t num_blocking_events = 1;
MojoTrapEvent blocking_event = {sizeof(blocking_event)};
@@ -211,15 +214,19 @@ MojoResult SimpleWatcher::Arm(MojoResult* ready_result,
}
}
+ recordreplay::Assert("SimpleWatcher::Arm Done %d", rv);
return rv;
}
void SimpleWatcher::ArmOrNotify() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ recordreplay::Assert("SimpleWatcher::ArmOrNotify Start");
// Already cancelled, nothing to do.
- if (!IsWatching())
+ if (!IsWatching()) {
+ recordreplay::Assert("SimpleWatcher::ArmOrNotify #1");
return;
+ }
MojoResult ready_result;
HandleSignalsState ready_state;
@@ -228,8 +235,10 @@ void SimpleWatcher::ArmOrNotify() {
// NOTE: If the watched handle has been closed, the above call will result in
// MOJO_RESULT_NOT_FOUND. A MOJO_RESULT_CANCELLED notification will already
// have been posted to this object as a result, so there's nothing else to do.
- if (rv == MOJO_RESULT_OK || rv == MOJO_RESULT_NOT_FOUND)
+ if (rv == MOJO_RESULT_OK || rv == MOJO_RESULT_NOT_FOUND) {
+ recordreplay::Assert("SimpleWatcher::ArmOrNotify #2");
return;
+ }
DCHECK_EQ(MOJO_RESULT_FAILED_PRECONDITION, rv);
{
@@ -240,6 +249,8 @@ void SimpleWatcher::ArmOrNotify() {
weak_factory_.GetWeakPtr(), watch_id_,
ready_result, ready_state));
}
+
+ recordreplay::Assert("SimpleWatcher::ArmOrNotify Done");
}
void SimpleWatcher::OnHandleReady(int watch_id,
@@ -247,10 +258,14 @@ void SimpleWatcher::OnHandleReady(int watch_id,
const HandleSignalsState& state) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ recordreplay::Assert("SimpleWatcher::OnHandleReady Start");
+
// This notification may be for a previously watched context, in which case
// we just ignore it.
- if (watch_id != watch_id_)
+ if (watch_id != watch_id_) {
+ recordreplay::Assert("SimpleWatcher::OnHandleReady #1");
return;
+ }
ReadyCallbackWithState callback = callback_;
if (result == MOJO_RESULT_CANCELLED) {
@@ -276,16 +291,22 @@ void SimpleWatcher::OnHandleReady(int watch_id,
base::WeakPtr<SimpleWatcher> weak_self = weak_factory_.GetWeakPtr();
callback.Run(result, state);
- if (!weak_self)
+ if (!weak_self) {
+ recordreplay::Assert("SimpleWatcher::OnHandleReady #2");
return;
+ }
// Prevent |MOJO_RESULT_FAILED_PRECONDITION| task spam by only notifying
// at most once in AUTOMATIC arming mode.
- if (result == MOJO_RESULT_FAILED_PRECONDITION)
+ if (result == MOJO_RESULT_FAILED_PRECONDITION) {
+ recordreplay::Assert("SimpleWatcher::OnHandleReady #3");
return;
+ }
if (arming_policy_ == ArmingPolicy::AUTOMATIC && IsWatching())
ArmOrNotify();
}
+
+ recordreplay::Assert("SimpleWatcher::OnHandleReady Done");
}
} // namespace mojo
diff --git a/third_party/blink/common/associated_interfaces/associated_interface_registry.cc b/third_party/blink/common/associated_interfaces/associated_interface_registry.cc
index 652296dc5c8d..832d5f25e757 100644
--- a/third_party/blink/common/associated_interfaces/associated_interface_registry.cc
+++ b/third_party/blink/common/associated_interfaces/associated_interface_registry.cc
@@ -4,6 +4,8 @@
#include "third_party/blink/public/common/associated_interfaces/associated_interface_registry.h"
+#include "base/record_replay.h"
+
namespace blink {
AssociatedInterfaceRegistry::AssociatedInterfaceRegistry() = default;
@@ -23,10 +25,14 @@ void AssociatedInterfaceRegistry::RemoveInterface(const std::string& name) {
bool AssociatedInterfaceRegistry::TryBindInterface(
const std::string& name,
mojo::ScopedInterfaceEndpointHandle* handle) {
+ recordreplay::Assert("AssociatedInterfaceRegistry::TryBindInterface Start");
auto it = interfaces_.find(name);
- if (it == interfaces_.end())
+ if (it == interfaces_.end()) {
+ recordreplay::Assert("AssociatedInterfaceRegistry::TryBindInterface #1");
return false;
+ }
it->second.Run(std::move(*handle));
+ recordreplay::Assert("AssociatedInterfaceRegistry::TryBindInterface Done");
return true;
}
diff --git a/third_party/blink/renderer/bindings/bindings.gni b/third_party/blink/renderer/bindings/bindings.gni
index c46ab96f035f..59abca639161 100644
--- a/third_party/blink/renderer/bindings/bindings.gni
+++ b/third_party/blink/renderer/bindings/bindings.gni
@@ -60,6 +60,8 @@ bindings_core_v8_files =
"core/v8/native_value_traits_impl.h",
"core/v8/profiler_trace_builder.cc",
"core/v8/profiler_trace_builder.h",
+ "core/v8/record_replay_interface.cc",
+ "core/v8/record_replay_interface.h",
"core/v8/referrer_script_info.cc",
"core/v8/referrer_script_info.h",
"core/v8/rejected_promises.cc",
diff --git a/third_party/blink/renderer/bindings/core/v8/local_window_proxy.cc b/third_party/blink/renderer/bindings/core/v8/local_window_proxy.cc
index 16d2cc458b3a..0f3e89233984 100644
--- a/third_party/blink/renderer/bindings/core/v8/local_window_proxy.cc
+++ b/third_party/blink/renderer/bindings/core/v8/local_window_proxy.cc
@@ -32,8 +32,10 @@
#include "base/debug/dump_without_crashing.h"
#include "base/memory/scoped_refptr.h"
+#include "base/record_replay.h"
#include "third_party/blink/renderer/bindings/core/v8/isolated_world_csp.h"
#include "third_party/blink/renderer/bindings/core/v8/script_controller.h"
+#include "third_party/blink/renderer/bindings/core/v8/record_replay_interface.h"
#include "third_party/blink/renderer/bindings/core/v8/to_v8_for_core.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_context_snapshot.h"
@@ -145,6 +147,8 @@ void LocalWindowProxy::DisposeContext(Lifecycle next_status,
lifecycle_ = next_status;
}
+static bool gHasContext;
+
void LocalWindowProxy::Initialize() {
TRACE_EVENT1("v8", "LocalWindowProxy::Initialize", "IsMainFrame",
GetFrame()->IsMainFrame());
@@ -194,6 +198,15 @@ void LocalWindowProxy::Initialize() {
SetSecurityToken(origin.get());
}
+ // After creating the first context, we are ready to set up the state used
+ // to process driver commands when recording/replaying, and to create
+ // checkpoints. Create the first checkpoint at which execution can pause.
+ if (recordreplay::IsRecordingOrReplaying() && !gHasContext) {
+ gHasContext = true;
+ SetupRecordReplayCommands(GetIsolate());
+ recordreplay::NewCheckpoint();
+ }
+
{
TRACE_EVENT1("v8", "ContextCreatedNotification", "IsMainFrame",
GetFrame()->IsMainFrame());
diff --git a/third_party/blink/renderer/bindings/core/v8/record_replay_interface.cc b/third_party/blink/renderer/bindings/core/v8/record_replay_interface.cc
new file mode 100644
index 000000000000..c53216be12c4
--- /dev/null
+++ b/third_party/blink/renderer/bindings/core/v8/record_replay_interface.cc
@@ -0,0 +1,898 @@
+// Copyright 2021 Record Replay Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/bindings/core/v8/record_replay_interface.h"
+
+#include "base/record_replay.h"
+#include "v8/include/v8-inspector.h"
+
+namespace v8 {
+
+extern void FunctionCallbackRecordReplaySetCommandCallback(const FunctionCallbackInfo<Value>& args);
+extern void FunctionCallbackRecordReplaySetClearPauseDataCallback(const FunctionCallbackInfo<Value>& callArgs);
+extern void FunctionCallbackRecordReplayIgnoreScript(const FunctionCallbackInfo<Value>& args);
+
+} // namespace v8
+
+namespace blink {
+
+const char* gRecordReplayScript = R""""(
+
+const {
+ log,
+ setCDPMessageCallback,
+ sendCDPMessage,
+ setCommandCallback,
+ setClearPauseDataCallback,
+ ignoreScript,
+ dump,
+} = __RECORD_REPLAY_ARGUMENTS__;
+
+try {
+
+window.dump = dump;
+
+///////////////////////////////////////////////////////////////////////////////
+// utils.js
+///////////////////////////////////////////////////////////////////////////////
+
+function assert(v) {
+ if (!v) {
+ log(`Assertion failed ${Error().stack}`);
+ throw new Error("Assertion failed");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// message.js
+///////////////////////////////////////////////////////////////////////////////
+
+function initMessages() {
+ setCDPMessageCallback(messageCallback);
+}
+
+let gNextMessageId = 1;
+
+let gCurrentMessageId;
+let gCurrentMessageResult;
+
+function sendMessage(method, params) {
+ const id = gNextMessageId++;
+ gCurrentMessageId = id;
+ sendCDPMessage(JSON.stringify({ method, params, id }));
+ gCurrentMessageId = undefined;
+ return gCurrentMessageResult;
+}
+
+const gEventListeners = new Map();
+
+function addEventListener(method, callback) {
+ gEventListeners.set(method, callback);
+}
+
+function messageCallback(message) {
+ try {
+ message = JSON.parse(message);
+
+ if (message.id) {
+ assert(message.id == gCurrentMessageId);
+ gCurrentMessageResult = message.result;
+ } else {
+ const listener = gEventListeners.get(message.method);
+ if (listener) {
+ listener(message.params);
+ }
+ }
+ } catch (e) {
+ log(`Message callback exception: ${e}`);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// main.js
+///////////////////////////////////////////////////////////////////////////////
+
+// Methods for interacting with the record/replay driver.
+
+initMessages();
+setCommandCallback(commandCallback);
+setClearPauseDataCallback(clearPauseDataCallback);
+addEventListener("Runtime.consoleAPICalled", onConsoleAPICall);
+sendMessage("Runtime.enable");
+
+const CommandCallbacks = {
+ "Target.countStackFrames": Target_countStackFrames,
+ "Target.getCurrentMessageContents": Target_getCurrentMessageContents,
+ "Target.getSourceMapURL": Target_getSourceMapURL,
+ "Target.getStepOffsets": Target_getStepOffsets,
+ "Target.topFrameLocation": Target_topFrameLocation,
+ "Pause.evaluateInFrame": Pause_evaluateInFrame,
+ "Pause.getAllFrames": Pause_getAllFrames,
+ "Pause.getObjectPreview": Pause_getObjectPreview,
+ "Pause.getObjectProperty": Pause_getObjectProperty,
+ "Pause.getScope": Pause_getScope,
+};
+
+function commandCallback(method, params) {
+ if (!CommandCallbacks[method]) {
+ log(`Missing command callback: ${method}`);
+ return {};
+ }
+
+ try {
+ return CommandCallbacks[method](params);
+ } catch (e) {
+ log(`Error: Command exception ${method} ${e}`);
+ return {};
+ }
+}
+
+function Target_countStackFrames() {
+ const count = getStackFrames().length;
+ return { count };
+}
+
+// Contents of the last console API call. Runtime.consoleAPICalled will be
+// emitted before the driver gets the current message contents.
+let gLastConsoleAPICall;
+
+function onConsoleAPICall(params) {
+ gLastConsoleAPICall = params;
+}
+
+function Target_getCurrentMessageContents() {
+ // Look for the "args" variable on an onConsoleMessage frame.
+ // The arguments are also stored on the last console API call, though
+ // if we use that we need to be careful because the pause state could have
+ // been cleared since the last Runtime.consoleAPICalled event.
+ const { callFrames } = sendMessage("Debugger.getCallFrames");
+ const consoleMessageFrame = callFrames.find(
+ frame => frame.functionName == "onConsoleMessage"
+ );
+ assert(consoleMessageFrame);
+ assert(consoleMessageFrame.this.type == "object");
+ assert(consoleMessageFrame.this.className == "Array");
+ const argumentsId = consoleMessageFrame.this.objectId;
+
+ // Get the properties of the message arguments array.
+ const argumentsProperties = sendMessage("Runtime.getProperties", {
+ objectId: argumentsId,
+ ownProperties: true,
+ generatePreview: false,
+ }).result;
+
+ // Get the protocol representation of the message arguments.
+ const argumentValues = [];
+ for (let i = 0;; i++) {
+ const property = argumentsProperties.find(prop => prop.name == i.toString());
+ if (!property) {
+ break;
+ }
+ argumentValues.push(remoteObjectToProtocolValue(property.value));
+ }
+
+ let level = "info";
+ switch (gLastConsoleAPICall.level) {
+ case "warning":
+ level = "warning";
+ break;
+ case "error":
+ level = "error";
+ break;
+ }
+
+ let url, sourceId, line, column;
+ if (gLastConsoleAPICall.stackTrace) {
+ const frame = gLastConsoleAPICall.stackTrace.callFrames[0];
+ if (frame) {
+ url = frame.url;
+ sourceId = frame.scriptId;
+ line = frame.lineNumber;
+ column = frame.columnNumber;
+ }
+ }
+
+ return {
+ source: "ConsoleAPI",
+ level,
+ text: "",
+ url,
+ sourceId,
+ line,
+ column,
+ argumentValues,
+ };
+}
+
+function Target_getSourceMapURL() {
+ // NYI
+ return {};
+}
+
+function Target_getStepOffsets() {
+ // CDP does not distinguish between steps and breakpoints.
+ return {};
+}
+
+function Target_topFrameLocation() {
+ const frames = getStackFrames();
+ if (!frames.length) {
+ return {};
+ }
+ return { location: createProtocolLocation(frames[0].location)[0] };
+}
+
+// Get the raw call frames on the stack, eliding ones in scripts we are ignoring.
+function getStackFrames() {
+ const { callFrames } = sendMessage("Debugger.getCallFrames");
+
+ const frames = [];
+ for (const frame of callFrames) {
+ if (!ignoreScript(frame.location.scriptId)) {
+ frames.push(frame);
+ }
+ }
+ return frames;
+}
+
+// Build a protocol Result object from a result/exceptionDetails CDP rval.
+function buildProtocolResult({ result, exceptionDetails }) {
+ const value = remoteObjectToProtocolValue(result);
+ const protocolResult = { data: {} };
+
+ if (exceptionDetails) {
+ protocolResult.exception = value;
+ } else {
+ protocolResult.returned = value;
+ }
+ return { result: protocolResult };
+}
+
+function Pause_evaluateInFrame({ frameId, expression }) {
+ const frames = getStackFrames();
+ const index = +frameId;
+ assert(index < frames.length);
+ const frame = frames[index];
+
+ const rv = doEvaluation();
+ return buildProtocolResult(rv);
+
+ function doEvaluation() {
+ // In order to do the evaluation in the right frame, the same number of
+ // frames need to be on V8's stack when we do the evaluation as when we got
+ // the stack frames in the first place. The debugger agent extracts a frame
+ // index from the ID it is given and uses that to walk the stack to the
+ // frame where it will do the evaluation (see DebugStackTraceIterator).
+ return sendMessage(
+ "Debugger.evaluateOnCallFrame",
+ {
+ callFrameId: frame.callFrameId,
+ expression,
+ }
+ );
+ }
+}
+
+function Pause_getAllFrames() {
+ const frames = getStackFrames().map((frame, index) => {
+ // Use our own IDs for frames.
+ const id = (index++).toString();
+ return createProtocolFrame(id, frame);
+ });
+
+ return {
+ frames: frames.map(f => f.frameId),
+ data: { frames },
+ };
+}
+
+function Pause_getObjectPreview({ object, level = "full" }) {
+ const objectData = createProtocolObject(object, level);
+ return { data: { objects: [objectData] } };
+}
+
+function Pause_getObjectProperty({ object, name }) {
+ const obj = protocolIdToRemoteObject(object);
+ const rv = sendMessage(
+ "Runtime.callFunctionOn",
+ {
+ functionDeclaration: `function() { return this["${name}"] }`,
+ objectId: obj.objectId,
+ }
+ );
+ return buildProtocolResult(rv);
+}
+
+function Pause_getScope({ scope }) {
+ const scopeData = createProtocolScope(scope);
+ return { data: { scopes: [scopeData] } };
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// object.js
+///////////////////////////////////////////////////////////////////////////////
+
+// Manage association between remote objects and protocol object IDs.
+
+// Map protocol ObjectId => RemoteObject
+const gProtocolIdToObject = new Map();
+
+// Map RemoteObject.objectId => protocol ObjectId
+const gObjectIdToProtocolId = new Map();
+
+// Map protocol ScopeId => Debugger.Scope
+const gProtocolIdToScope = new Map();
+
+let gNextObjectId = 1;
+
+function clearPauseDataCallback() {
+ gProtocolIdToObject.clear();
+ gObjectIdToProtocolId.clear();
+ gProtocolIdToScope.clear();
+ gNextObjectId = 1;
+}
+
+function remoteObjectToProtocolId(remoteObject) {
+ assert(remoteObject.objectId);
+
+ const existing = gObjectIdToProtocolId.get(remoteObject.objectId);
+ if (existing) {
+ return existing;
+ }
+
+ const protocolObjectId = (gNextObjectId++).toString();
+ gObjectIdToProtocolId.set(remoteObject.objectId, protocolObjectId);
+ gProtocolIdToObject.set(protocolObjectId, remoteObject);
+
+ return protocolObjectId;
+}
+
+function protocolIdToRemoteObject(objectId) {
+ const remoteObject = gProtocolIdToObject.get(objectId);
+ assert(remoteObject);
+ return remoteObject;
+}
+
+function remoteObjectToProtocolValue(obj) {
+ switch (obj.type) {
+ case "undefined":
+ return {};
+ case "string":
+ case "number":
+ case "boolean":
+ if (obj.unserializableValue) {
+ assert(obj.type == "number");
+ return { unserializableNumber: obj.unserializableValue };
+ }
+ return { value: obj.value };
+ case "bigint": {
+ const str = obj.unserializableValue;
+ assert(str);
+ return { bigint: str.substring(0, str.length - 1) };
+ }
+ case "object":
+ case "function": {
+ if (!obj.objectId) {
+ return { value: null };
+ }
+ const object = remoteObjectToProtocolId(obj);
+ return { object };
+ }
+ default:
+ return { unavailable: true };
+ }
+}
+
+function scopeToProtocolId(scope) {
+ // Use the scope object's ID as the ID for the scope itself.
+ const id = remoteObjectToProtocolId(scope.object);
+ gProtocolIdToScope.set(id, scope);
+ return id;
+}
+
+function protocolIdToScope(scopeId) {
+ const scope = gProtocolIdToScope.get(scopeId);
+ assert(scope);
+ return scope;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// preview.js
+///////////////////////////////////////////////////////////////////////////////
+
+// Logic for creating object previews for the record/replay protocol.
+
+function createProtocolObject(objectId, level) {
+ const obj = protocolIdToRemoteObject(objectId);
+ const className = obj.className || "Function";
+
+ let preview;
+ if (level != "none") {
+ preview = new ProtocolObjectPreview(obj, level).fill();
+ }
+
+ return { objectId, className, preview };
+}
+
+// Note: this is higher than on gecko-dev because typed arrays don't render
+// properly in the devtools currently unless we include a minimum number of
+// properties. This would be nice to fix.
+const NumItemsBeforeOverflow = 12;
+
+function ProtocolObjectPreview(obj, level) {
+ this.obj = obj;
+ this.level = level;
+ this.overflow = false;
+ this.numItems = 0;
+ this.extra = {};
+}
+
+ProtocolObjectPreview.prototype = {
+ canAddItem(force) {
+ if (this.level == "noProperties") {
+ this.overflow = true;
+ return false;
+ }
+ if (!force && this.level == "canOverflow" && this.numItems >= NumItemsBeforeOverflow) {
+ this.overflow = true;
+ return false;
+ }
+ this.numItems++;
+ return true;
+ },
+
+ addProperty(property, force) {
+ if (!this.canAddItem(force)) {
+ return;
+ }
+ if (!this.properties) {
+ this.properties = [];
+ }
+ this.properties.push(property);
+ },
+
+ addContainerEntry(entry) {
+ if (!this.canAddItem()) {
+ return;
+ }
+ if (!this.containerEntries) {
+ this.containerEntries = [];
+ }
+ this.containerEntries.push(entry);
+ },
+
+ fill() {
+ const allProperties = sendMessage("Runtime.getProperties", {
+ objectId: this.obj.objectId,
+ ownProperties: true,
+ generatePreview: false,
+ });
+ const properties = allProperties.result;
+
+ // Add class-specific data.
+ const previewer = CustomPreviewers[this.obj.className];
+ const requiredProperties = [];
+ if (previewer) {
+ for (const entry of previewer) {
+ if (typeof entry == "string") {
+ requiredProperties.push(entry);
+ } else {
+ entry.call(this, allProperties);
+ }
+ }
+ }
+
+ let prototype;
+ for (const prop of properties) {
+ if (prop.name == "__proto__") {
+ prototype = prop;
+ } else {
+ const protocolProperty = createProtocolPropertyDescriptor(prop);
+ const force = requiredProperties.includes(prop.name);
+ this.addProperty(protocolProperty, force);
+ }
+ }
+
+ let prototypeId;
+ if (prototype && prototype.value && prototype.value.objectId) {
+ prototypeId = remoteObjectToProtocolId(prototype.value);
+ }
+ return {
+ prototypeId,
+ overflow: this.overflow ? true : undefined,
+ properties: this.properties,
+ containerEntries: this.containerEntries,
+ ...this.extra,
+ };
+ },
+};
+
+// Get a count from an object description like "Array(42)"
+function getDescriptionCount(description) {
+ const match = /\((\d+)\)/.exec(description || "");
+ if (match) {
+ return +match[1];
+ }
+}
+
+function previewTypedArray() {
+ // The typed array size isn't available from the object's own property
+ // information, except by parsing the object description.
+ const length = getDescriptionCount(this.obj.description);
+ if (length !== undefined) {
+ this.addProperty({ name: "length", value: length }, /* force */ true);
+ }
+}
+
+function previewSetMap(allProperties) {
+ if (!allProperties.internalProperties) {
+ return;
+ }
+
+ const internal = allProperties.internalProperties.find(prop => prop.name == "[[Entries]]");
+ if (!internal || !internal.value || !internal.value.objectId) {
+ return;
+ }
+
+ // Get the container size from the length of the entries.
+ const size = getDescriptionCount(internal.value.description);
+ if (size !== undefined) {
+ this.extra.containerEntryCount = size;
+ if (["Set", "Map"].includes(this.obj.className)) {
+ this.addProperty({ name: "size", value: size }, /* force */ true);
+ }
+ }
+
+ const entries = sendMessage("Runtime.getProperties", {
+ objectId: internal.value.objectId,
+ ownProperties: true,
+ generatePreview: false,
+ }).result;
+
+ for (const entry of entries) {
+ if (entry.value.subtype == "internal#entry") {
+ const entryProperties = sendMessage("Runtime.getProperties", {
+ objectId: entry.value.objectId,
+ ownProperties: true,
+ generatePreview: false,
+ }).result;
+ const key = entryProperties.find(eprop => eprop.name == "key");
+ const value = entryProperties.find(eprop => eprop.name == "value");
+ if (value) {
+ this.addContainerEntry({
+ key: key ? remoteObjectToProtocolValue(key.value) : undefined,
+ value: remoteObjectToProtocolValue(value.value),
+ });
+ }
+ }
+ if (this.overflow) {
+ break;
+ }
+ }
+}
+
+function previewRegExp() {
+ this.extra.regexpString = this.obj.description;
+}
+
+function previewDate() {
+ this.extra.dateTime = Date.parse(this.obj.description);
+}
+
+function previewError() {
+ this.addProperty({ name: "name", value: this.obj.className }, /* force */ true);
+}
+
+const ErrorProperties = [
+ "message",
+ "stack",
+ previewError,
+];
+
+function previewFunction(allProperties) {
+ const nameProperty = allProperties.result.find(prop => prop.name == "name");
+ if (nameProperty) {
+ this.extra.functionName = nameProperty.value.value;
+ }
+
+ const locationProperty = allProperties.internalProperties.find(
+ prop => prop.name == "[[FunctionLocation]]"
+ );
+ if (locationProperty) {
+ this.extra.functionLocation = createProtocolLocation(locationProperty.value.value);
+ }
+}
+
+const CustomPreviewers = {
+ Array: ["length"],
+ Int8Array: [previewTypedArray],
+ Uint8Array: [previewTypedArray],
+ Uint8ClampedArray: [previewTypedArray],
+ Int16Array: [previewTypedArray],
+ Uint16Array: [previewTypedArray],
+ Int32Array: [previewTypedArray],
+ Uint32Array: [previewTypedArray],
+ Float32Array: [previewTypedArray],
+ Float64Array: [previewTypedArray],
+ BigInt64Array: [previewTypedArray],
+ BigUint64Array: [previewTypedArray],
+ Map: [previewSetMap],
+ WeakMap: [previewSetMap],
+ Set: [previewSetMap],
+ WeakSet: [previewSetMap],
+ RegExp: [previewRegExp],
+ Date: [previewDate],
+ Error: ErrorProperties,
+ EvalError: ErrorProperties,
+ RangeError: ErrorProperties,
+ ReferenceError: ErrorProperties,
+ SyntaxError: ErrorProperties,
+ TypeError: ErrorProperties,
+ URIError: ErrorProperties,
+ Function: [previewFunction],
+};
+
+function createProtocolPropertyDescriptor(desc) {
+ const { name, value, writable, get, set, configurable, enumerable } = desc;
+
+ const rv = value ? remoteObjectToProtocolValue(value) : {};
+ rv.name = name;
+
+ let flags = 0;
+ if (writable) {
+ flags |= 1;
+ }
+ if (configurable) {
+ flags |= 2;
+ }
+ if (enumerable) {
+ flags |= 4;
+ }
+ if (flags != 7) {
+ rv.flags = flags;
+ }
+
+ if (get && get.objectId) {
+ rv.get = remoteObjectToProtocolId(get);
+ }
+ if (set && set.objectId) {
+ rv.set = remoteObjectToProtocolId(set);
+ }
+
+ return rv;
+}
+
+function createProtocolLocation(location) {
+ if (!location) {
+ return undefined;
+ }
+ const { scriptId, lineNumber, columnNumber } = location;
+ return [{
+ sourceId: scriptId,
+ // CDP line numbers are 0-indexed, while RRP line numbers are 1-indexed.
+ line: lineNumber + 1,
+ column: columnNumber,
+ }];
+}
+
+function createProtocolFrame(frameId, frame) {
+ // CDP call frames don't provide detailed type information.
+ const type = frame.functionName ? "call" : "global";
+
+ return {
+ frameId,
+ type,
+ functionName: frame.functionName || undefined,
+ functionLocation: createProtocolLocation(frame.functionLocation),
+ location: createProtocolLocation(frame.location),
+ scopeChain: frame.scopeChain.map(scopeToProtocolId),
+ this: remoteObjectToProtocolValue(frame.this),
+ };
+}
+
+function createProtocolScope(scopeId) {
+ const scope = protocolIdToScope(scopeId);
+
+ let type;
+ switch (scope.type) {
+ case "global":
+ type = "global";
+ break;
+ case "with":
+ type = "with";
+ break;
+ default:
+ type = scope.name ? "function" : "block";
+ break;
+ }
+
+ let object, bindings;
+ if (type == "global" || type == "with") {
+ object = remoteObjectToProtocolId(scope.object);
+ } else {
+ bindings = [];
+
+ const properties = sendMessage("Runtime.getProperties", {
+ objectId: scope.object.objectId,
+ ownProperties: true,
+ generatePreview: false,
+ }).result;
+ for (const { name, value } of properties) {
+ const converted = remoteObjectToProtocolValue(value);
+ bindings.push({ ...converted, name });
+ }
+ }
+
+ return {
+ scopeId,
+ type,
+ object,
+ functionName: scope.name || undefined,
+ bindings,
+ };
+}
+
+} catch (e) {
+ log(`Error: Initialization exception ${e}`);
+}
+
+)"""";
+
+static void SetFunctionProperty(v8::Isolate* isolate, v8::Local<v8::Object> obj,
+ const char* name, v8::FunctionCallback callback) {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::FunctionTemplate> function_template =
+ v8::FunctionTemplate::New(isolate, callback, v8::Local<v8::Value>(),
+ v8::Local<v8::Signature>(), 0,
+ v8::ConstructorBehavior::kThrow,
+ v8::SideEffectType::kHasSideEffect);
+ v8::Local<v8::Function> function =
+ function_template->GetFunction(context).ToLocalChecked();
+
+ v8::Local<v8::String> name_string =
+ v8::String::NewFromUtf8(isolate, name,
+ v8::NewStringType::kInternalized).ToLocalChecked();
+
+ obj->Set(context, name_string, function).Check();
+ function->SetName(name_string);
+}
+
+static void RecordReplayLog(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(args.Length() == 1 && args[0]->IsString() &&
+ "must be called with a single string");
+ v8::String::Utf8Value text(args.GetIsolate(), args[0]);
+ recordreplay::Print("%s", *text);
+}
+
+// Function to invoke on CDP responses and events.
+static v8::Eternal<v8::Function>* gCDPMessageCallback;
+
+static void RecordReplaySetCDPMessageCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(!gCDPMessageCallback);
+ v8::Isolate* isolate = args.GetIsolate();
+ CHECK(args[0]->IsFunction());
+ v8::Local<v8::Function> callback = args[0].As<v8::Function>();
+ gCDPMessageCallback = new v8::Eternal<v8::Function>(isolate, callback);
+}
+
+static void SendMessageToFrontend(const v8_inspector::StringView& message) {
+ CHECK(v8::IsMainThread());
+
+ CHECK(gCDPMessageCallback);
+ CHECK(!message.is8Bit());
+
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Value> arg = v8::String::NewFromTwoByte(isolate, message.characters16(),
+ v8::NewStringType::kNormal,
+ message.length()).ToLocalChecked();
+ v8::Local<v8::Function> callback = gCDPMessageCallback->Get(isolate);
+ v8::MaybeLocal<v8::Value> rv = callback->Call(context, v8::Undefined(isolate), 1, &arg);
+ CHECK(!rv.IsEmpty());
+}
+
+struct InspectorClient : public v8_inspector::V8InspectorClient {
+ v8::Local<v8::Context> ensureDefaultContextInGroup(int context_group_id) final {
+ recordreplay::Print("InspectorClient::ensureDefaultContextInGroup");
+ return v8::Local<v8::Context>();
+ }
+};
+
+struct InspectorChannel final : public v8_inspector::V8Inspector::Channel {
+ void sendResponse(int callId,
+ std::unique_ptr<v8_inspector::StringBuffer> message) final {
+ SendMessageToFrontend(message->string());
+ }
+ void sendNotification(std::unique_ptr<v8_inspector::StringBuffer> message) final {
+ SendMessageToFrontend(message->string());
+ }
+ void flushProtocolNotifications() final {}
+};
+
+const int CONTEXT_GROUP_ID = 1;
+
+static v8_inspector::V8Inspector* gInspector;
+static v8_inspector::V8InspectorSession* gInspectorSession;
+
+static void RecordReplaySendCDPMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(args.Length() == 1 && args[0]->IsString() &&
+ "must be called with a single string");
+ v8::String::Utf8Value message(args.GetIsolate(), args[0]);
+
+ if (!gInspectorSession) {
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+
+ gInspector = v8_inspector::V8Inspector::create(args.GetIsolate(),
+ new InspectorClient()).release();
+
+ v8_inspector::V8ContextInfo context_info(context, CONTEXT_GROUP_ID, v8_inspector::StringView());
+ gInspector->contextCreated(context_info);
+
+ gInspectorSession = gInspector->connect(CONTEXT_GROUP_ID, new InspectorChannel(),
+ v8_inspector::StringView()).release();
+ }
+
+ std::string nmessage(*message);
+ v8_inspector::StringView messageView((const uint8_t*)nmessage.c_str(), nmessage.length());
+ gInspectorSession->dispatchProtocolMessage(messageView);
+}
+
+extern "C" void V8RecordReplayFinishRecording();
+
+// Mimic the gecko test runner behavior when using window.dump() to signal that the
+// recording is finished. This is pretty hacky.
+static void RecordReplayDump(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() == 1 && args[0]->IsString()) {
+ v8::String::Utf8Value message(args.GetIsolate(), args[0]);
+ if (!strcmp(*message, "RecReplaySendAsyncMessage Example__Finished")) {
+ V8RecordReplayFinishRecording();
+ }
+ }
+}
+
+void SetupRecordReplayCommands(v8::Isolate* isolate) {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+
+ v8::Local<v8::String> args_name_string =
+ v8::String::NewFromUtf8(isolate, "__RECORD_REPLAY_ARGUMENTS__",
+ v8::NewStringType::kInternalized).ToLocalChecked();
+
+ v8::Local<v8::Object> args = v8::Object::New(isolate);
+ context->Global()->Set(context, args_name_string, args).Check();
+
+ SetFunctionProperty(isolate, args, "log",
+ RecordReplayLog);
+ SetFunctionProperty(isolate, args, "setCDPMessageCallback",
+ RecordReplaySetCDPMessageCallback);
+ SetFunctionProperty(isolate, args, "sendCDPMessage",
+ RecordReplaySendCDPMessage);
+ SetFunctionProperty(isolate, args, "setCommandCallback",
+ v8::FunctionCallbackRecordReplaySetCommandCallback);
+ SetFunctionProperty(isolate, args, "setClearPauseDataCallback",
+ v8::FunctionCallbackRecordReplaySetClearPauseDataCallback);
+ SetFunctionProperty(isolate, args, "ignoreScript",
+ v8::FunctionCallbackRecordReplayIgnoreScript);
+ SetFunctionProperty(isolate, args, "dump",
+ RecordReplayDump);
+
+ v8::Local<v8::String> source =
+ v8::String::NewFromUtf8(isolate, gRecordReplayScript,
+ v8::NewStringType::kInternalized).ToLocalChecked();
+
+ v8::Local<v8::String> filename =
+ v8::String::NewFromUtf8(isolate, "record-replay-internal",
+ v8::NewStringType::kInternalized).ToLocalChecked();
+
+ v8::ScriptOrigin origin(filename);
+ v8::Local<v8::Script> script = v8::Script::Compile(context, source, &origin).ToLocalChecked();
+ script->Run(context).ToLocalChecked();
+}
+
+void RecordReplayOnErrorEvent(ErrorEvent* error_event) {
+ recordreplay::Print("ON_ERROR_EVENT %d", error_event->record_replay_bookmark());
+}
+
+} // namespace blink
diff --git a/third_party/blink/renderer/bindings/core/v8/record_replay_interface.h b/third_party/blink/renderer/bindings/core/v8/record_replay_interface.h
new file mode 100644
index 000000000000..e6019b67fb71
--- /dev/null
+++ b/third_party/blink/renderer/bindings/core/v8/record_replay_interface.h
@@ -0,0 +1,21 @@
+// Copyright 2021 Record Replay Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_RECORD_REPLAY_INTERFACE_H_
+#define THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_RECORD_REPLAY_INTERFACE_H_
+
+#include "third_party/blink/renderer/core/events/error_event.h"
+#include "v8/include/v8.h"
+
+namespace blink {
+
+// Initialize command state after the first context is created.
+void SetupRecordReplayCommands(v8::Isolate* isolate);
+
+// Notify the driver that we're adding an error to the console.
+void RecordReplayOnErrorEvent(ErrorEvent* error_event);
+
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_BINDINGS_CORE_V8_RECORD_REPLAY_INTERFACE_H_
diff --git a/third_party/blink/renderer/bindings/core/v8/v8_initializer.cc b/third_party/blink/renderer/bindings/core/v8/v8_initializer.cc
index 4fbbdb2304a4..601883e14ead 100644
--- a/third_party/blink/renderer/bindings/core/v8/v8_initializer.cc
+++ b/third_party/blink/renderer/bindings/core/v8/v8_initializer.cc
@@ -145,6 +145,8 @@ const size_t kWasmWireBytesLimit = 1 << 12;
} // namespace
+extern "C" int V8GetMessageRecordReplayBookmark(v8::Local<v8::Message> message);
+
void V8Initializer::MessageHandlerInMainThread(v8::Local<v8::Message> message,
v8::Local<v8::Value> data) {
DCHECK(IsMainThread());
@@ -178,6 +180,11 @@ void V8Initializer::MessageHandlerInMainThread(v8::Local<v8::Message> message,
ToCoreStringWithNullCheck(message->Get()), std::move(location),
ScriptValue::From(script_state, data), &script_state->World());
+ int bookmark = V8GetMessageRecordReplayBookmark(message);
+ if (bookmark) {
+ event->set_record_replay_bookmark(bookmark);
+ }
+
String message_for_console = ExtractMessageForConsole(isolate, data);
if (!message_for_console.IsEmpty())
event->SetUnsanitizedMessage("Uncaught " + message_for_console);
diff --git a/third_party/blink/renderer/core/events/error_event.h b/third_party/blink/renderer/core/events/error_event.h
index 52a7cced577c..614b334213ed 100644
--- a/third_party/blink/renderer/core/events/error_event.h
+++ b/third_party/blink/renderer/core/events/error_event.h
@@ -89,6 +89,8 @@ class CORE_EXPORT ErrorEvent final : public Event {
const String& filename() const { return location_->Url(); }
unsigned lineno() const { return location_->LineNumber(); }
unsigned colno() const { return location_->ColumnNumber(); }
+ void set_record_replay_bookmark(int bookmark) { record_replay_bookmark_ = bookmark; }
+ int record_replay_bookmark() const { return record_replay_bookmark_; }
ScriptValue error(ScriptState*) const;
// Not exposed to JavaScript, prefers |unsanitized_message_|.
@@ -114,6 +116,7 @@ class CORE_EXPORT ErrorEvent final : public Event {
std::unique_ptr<SourceLocation> location_;
WorldSafeV8Reference<v8::Value> error_;
scoped_refptr<DOMWrapperWorld> world_;
+ int record_replay_bookmark_ = 0;
};
template <>
diff --git a/third_party/blink/renderer/core/execution_context/execution_context.cc b/third_party/blink/renderer/core/execution_context/execution_context.cc
index f191f6df84f5..9e9d37fb1a79 100644
--- a/third_party/blink/renderer/core/execution_context/execution_context.cc
+++ b/third_party/blink/renderer/core/execution_context/execution_context.cc
@@ -33,6 +33,7 @@
#include "third_party/blink/public/mojom/feature_policy/feature_policy_feature.mojom-blink.h"
#include "third_party/blink/public/mojom/feature_policy/policy_disposition.mojom-blink.h"
#include "third_party/blink/public/platform/task_type.h"
+#include "third_party/blink/renderer/bindings/core/v8/record_replay_interface.h"
#include "third_party/blink/renderer/bindings/core/v8/source_location.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_binding_for_core.h"
#include "third_party/blink/renderer/core/dom/events/event_target.h"
@@ -226,6 +227,8 @@ void ExecutionContext::AddConsoleMessageImpl(mojom::ConsoleMessageSource source,
void ExecutionContext::DispatchErrorEvent(
ErrorEvent* error_event,
SanitizeScriptErrors sanitize_script_errors) {
+ RecordReplayOnErrorEvent(error_event);
+
if (in_dispatch_error_event_) {
pending_exceptions_.push_back(error_event);
return;
diff --git a/third_party/blink/renderer/core/html/parser/background_html_input_stream.cc b/third_party/blink/renderer/core/html/parser/background_html_input_stream.cc
index 4585de546ce9..fbdac4502ec1 100644
--- a/third_party/blink/renderer/core/html/parser/background_html_input_stream.cc
+++ b/third_party/blink/renderer/core/html/parser/background_html_input_stream.cc
@@ -25,6 +25,8 @@
#include "third_party/blink/renderer/core/html/parser/background_html_input_stream.h"
+#include "base/record_replay.h"
+
namespace blink {
BackgroundHTMLInputStream::BackgroundHTMLInputStream()
@@ -33,6 +35,8 @@ BackgroundHTMLInputStream::BackgroundHTMLInputStream()
total_checkpoint_token_count_(0) {}
void BackgroundHTMLInputStream::Append(const String& input) {
+ recordreplay::Assert("BackgroundHTMLInputStream::Append %u %u", input.length(), input[0]);
+
current_.Append(SegmentedString(input));
segments_.push_back(input);
}
@@ -115,6 +119,8 @@ void BackgroundHTMLInputStream::RewindTo(HTMLInputCheckpoint checkpoint_index,
first_valid_segment_index_ = 0;
UpdateTotalCheckpointTokenCount();
+
+ recordreplay::Assert("BackgroundHTMLInputStream::RewindTo %u", current_.length());
}
void BackgroundHTMLInputStream::UpdateTotalCheckpointTokenCount() {
diff --git a/third_party/blink/renderer/core/html/parser/background_html_parser.cc b/third_party/blink/renderer/core/html/parser/background_html_parser.cc
index 6bc906f027b4..2400e10d20d1 100644
--- a/third_party/blink/renderer/core/html/parser/background_html_parser.cc
+++ b/third_party/blink/renderer/core/html/parser/background_html_parser.cc
@@ -28,6 +28,7 @@
#include <memory>
#include <utility>
+#include "base/record_replay.h"
#include "base/single_thread_task_runner.h"
#include "third_party/blink/public/platform/platform.h"
#include "third_party/blink/renderer/core/html/parser/html_document_parser.h"
@@ -38,6 +39,11 @@
#include "third_party/blink/renderer/platform/wtf/functional.h"
#include "third_party/blink/renderer/platform/wtf/text/text_position.h"
+// V8 API for HTML parsing activity that will be reported to the record/replay driver.
+extern "C" void V8RecordReplayHTMLParseStart(void* token, const char* url);
+extern "C" void V8RecordReplayHTMLParseFinish(void* token);
+extern "C" void V8RecordReplayHTMLParseAddData(void* token, const char* data);
+
namespace blink {
// On a network with high latency and high bandwidth, using a device with a fast
@@ -81,6 +87,9 @@ void BackgroundHTMLParser::Init(
document_url, std::move(cached_document_parameters),
media_values_cached_data, TokenPreloadScanner::ScannerType::kMainDocument,
priority_hints_origin_trial_enabled));
+ if (recordreplay::IsRecordingOrReplaying()) {
+ V8RecordReplayHTMLParseStart(this, document_url.GetString().Utf8().c_str());
+ }
}
BackgroundHTMLParser::Configuration::Configuration() {}
@@ -99,7 +108,11 @@ BackgroundHTMLParser::BackgroundHTMLParser(
HTMLDocumentParser::TokenizedChunk::kNoPendingToken),
starting_script_(false) {}
-BackgroundHTMLParser::~BackgroundHTMLParser() = default;
+BackgroundHTMLParser::~BackgroundHTMLParser() {
+ if (recordreplay::IsRecordingOrReplaying()) {
+ V8RecordReplayHTMLParseFinish(this);
+ }
+}
void BackgroundHTMLParser::AppendRawBytesFromMainThread(
std::unique_ptr<Vector<char>> buffer) {
@@ -110,6 +123,9 @@ void BackgroundHTMLParser::AppendRawBytesFromMainThread(
void BackgroundHTMLParser::AppendDecodedBytes(const String& input) {
DCHECK(!input_.Current().IsClosed());
+ if (recordreplay::IsRecordingOrReplaying()) {
+ V8RecordReplayHTMLParseAddData(this, input.Utf8().c_str());
+ }
input_.Append(input);
PumpTokenizer();
}
@@ -126,16 +142,22 @@ void BackgroundHTMLParser::Flush() {
}
void BackgroundHTMLParser::UpdateDocument(const String& decoded_data) {
+ recordreplay::Assert("BackgroundHTMLParser::UpdateDocument Start %u %u",
+ decoded_data.length(), decoded_data[0]);
+
DocumentEncodingData encoding_data(*decoder_.get());
if (encoding_data != last_seen_encoding_data_) {
last_seen_encoding_data_ = encoding_data;
if (parser_)
parser_->DidReceiveEncodingDataFromBackgroundParser(encoding_data);
}
- if (decoded_data.IsEmpty())
+ if (decoded_data.IsEmpty()) {
+ recordreplay::Assert("BackgroundHTMLParser::UpdateDocument #1");
return;
+ }
AppendDecodedBytes(decoded_data);
+ recordreplay::Assert("BackgroundHTMLParser::UpdateDocument Done");
}
void BackgroundHTMLParser::ResumeFrom(std::unique_ptr<Checkpoint> checkpoint) {
@@ -190,11 +212,18 @@ void BackgroundHTMLParser::PumpTokenizer() {
HTMLTreeBuilderSimulator::SimulatedToken simulated_token =
HTMLTreeBuilderSimulator::kOtherToken;
+ recordreplay::Assert("BackgroundHTMLParser::PumpTokenizer Start %u",
+ input_.Current().length());
+
// No need to start speculating until the main thread has almost caught up.
- if (input_.TotalCheckpointTokenCount() > kOutstandingTokenLimit)
+ if (input_.TotalCheckpointTokenCount() > kOutstandingTokenLimit) {
+ recordreplay::Assert("BackgroundHTMLParser::PumpTokenizer #1");
return;
+ }
while (tokenizer_->NextToken(input_.Current(), *token_)) {
+ recordreplay::Assert("BackgroundHTMLParser::PumpTokenizer #2");
+
{
TextPosition position = TextPosition(input_.Current().CurrentLine(),
input_.Current().CurrentColumn());
@@ -211,6 +240,7 @@ void BackgroundHTMLParser::PumpTokenizer() {
// starting a script so the main parser can decide if it should yield
// before processing the chunk.
if (simulated_token == HTMLTreeBuilderSimulator::kValidScriptStart) {
+ recordreplay::Assert("BackgroundHTMLParser::PumpTokenizer #3");
EnqueueTokenizedChunk();
starting_script_ = true;
}
@@ -228,16 +258,21 @@ void BackgroundHTMLParser::PumpTokenizer() {
simulated_token == HTMLTreeBuilderSimulator::kLink ||
simulated_token == HTMLTreeBuilderSimulator::kCustomElementBegin ||
pending_tokens_.size() >= kPendingTokenLimit) {
+ recordreplay::Assert("BackgroundHTMLParser::PumpTokenizer #4");
EnqueueTokenizedChunk();
// If we're far ahead of the main thread, yield for a bit to avoid
// consuming too much memory.
- if (input_.TotalCheckpointTokenCount() > kOutstandingTokenLimit)
+ if (input_.TotalCheckpointTokenCount() > kOutstandingTokenLimit) {
+ recordreplay::Assert("BackgroundHTMLParser::PumpTokenizer #4.1");
break;
+ }
}
}
+ recordreplay::Assert("BackgroundHTMLParser::PumpTokenizer #5");
EnqueueTokenizedChunk();
+ recordreplay::Assert("BackgroundHTMLParser::PumpTokenizer Done");
}
void BackgroundHTMLParser::EnqueueTokenizedChunk() {
diff --git a/third_party/blink/renderer/core/html/parser/html_document_parser.cc b/third_party/blink/renderer/core/html/parser/html_document_parser.cc
index a68c66632294..2d8f13b9b170 100644
--- a/third_party/blink/renderer/core/html/parser/html_document_parser.cc
+++ b/third_party/blink/renderer/core/html/parser/html_document_parser.cc
@@ -30,6 +30,7 @@
#include "base/auto_reset.h"
#include "base/numerics/safe_conversions.h"
+#include "base/record_replay.h"
#include "third_party/blink/public/common/features.h"
#include "third_party/blink/public/common/loader/loading_behavior_flag.h"
#include "third_party/blink/public/mojom/appcache/appcache.mojom-blink.h"
@@ -629,11 +630,15 @@ void HTMLDocumentParser::EnqueueTokenizedChunk(
DCHECK(!RuntimeEnabledFeatures::ForceSynchronousHTMLParsingEnabled());
TRACE_EVENT0("blink", "HTMLDocumentParser::EnqueueTokenizedChunk");
+ recordreplay::Assert("HTMLDocumentParser::EnqueueTokenizedChunk Start");
+
DCHECK(chunk);
DCHECK(GetDocument());
- if (!IsParsing())
+ if (!IsParsing()) {
+ recordreplay::Assert("HTMLDocumentParser::EnqueueTokenizedChunk #1");
return;
+ }
// ApplicationCache needs to be initialized before issuing preloads. We
// suspend preload until HTMLHTMLElement is inserted and ApplicationCache is
@@ -673,6 +678,8 @@ void HTMLDocumentParser::EnqueueTokenizedChunk(
// Delay sending some requests if meta tag based CSP is present or
// if AppCache was used to fetch the HTML but was not yet initialized for
// this document.
+ recordreplay::Assert("HTMLDocumentParser::EnqueueTokenizedChunk #2 %d",
+ !!pending_csp_meta_token_);
if (pending_csp_meta_token_ ||
((!base::FeatureList::IsEnabled(
blink::features::kVerifyHTMLFetchedFromAppCacheBeforeDelay) ||
@@ -701,6 +708,8 @@ void HTMLDocumentParser::EnqueueTokenizedChunk(
if (!IsPaused() && !IsScheduledForUnpause())
parser_scheduler_->ScheduleForUnpause();
+
+ recordreplay::Assert("HTMLDocumentParser::EnqueueTokenizedChunk Done");
}
void HTMLDocumentParser::DidReceiveEncodingDataFromBackgroundParser(
@@ -1587,6 +1596,8 @@ void HTMLDocumentParser::AppendBytes(const char* data, size_t length) {
TRACE_EVENT2("blink", "HTMLDocumentParser::appendBytes", "size",
(unsigned)length, "parser", (void*)this);
+ recordreplay::Assert("HTMLDocumentParser::AppendBytes %lu %u", length, data[0]);
+
DCHECK(Thread::MainThread()->IsCurrentThread());
if (!length || IsStopped())
diff --git a/third_party/blink/renderer/core/html/parser/html_tokenizer.cc b/third_party/blink/renderer/core/html/parser/html_tokenizer.cc
index 241efc1df374..9cc0b7e98afa 100644
--- a/third_party/blink/renderer/core/html/parser/html_tokenizer.cc
+++ b/third_party/blink/renderer/core/html/parser/html_tokenizer.cc
@@ -27,6 +27,7 @@
#include "third_party/blink/renderer/core/html/parser/html_tokenizer.h"
+#include "base/record_replay.h"
#include "third_party/blink/renderer/core/html/parser/html_entity_parser.h"
#include "third_party/blink/renderer/core/html/parser/html_parser_idioms.h"
#include "third_party/blink/renderer/core/html/parser/html_tree_builder.h"
@@ -128,6 +129,9 @@ bool HTMLTokenizer::FlushEmitAndResumeIn(SegmentedString& source,
}
bool HTMLTokenizer::NextToken(SegmentedString& source, HTMLToken& token) {
+ recordreplay::Assert("HTMLTokenizer::NextToken Start %u %u",
+ source.length(), source.CurrentChar());
+
// If we have a token in progress, then we're supposed to be called back
// with the same token so we can finish it.
DCHECK(!token_ || token_ == &token ||
@@ -143,14 +147,19 @@ bool HTMLTokenizer::NextToken(SegmentedString& source, HTMLToken& token) {
temporary_buffer_.clear();
if (state_ == HTMLTokenizer::kDataState) {
// We're back in the data state, so we must be done with the tag.
+ recordreplay::Assert("HTMLTokenizer::NextToken #1");
return true;
}
}
- if (source.IsEmpty() || !input_stream_preprocessor_.Peek(source))
+ if (source.IsEmpty() || !input_stream_preprocessor_.Peek(source)) {
+ recordreplay::Assert("HTMLTokenizer::NextToken #2");
return HaveBufferedCharacterToken();
+ }
UChar cc = input_stream_preprocessor_.NextInputCharacter();
+ recordreplay::Assert("HTMLTokenizer::NextToken #3 %d", state_);
+
// Source: http://www.whatwg.org/specs/web-apps/current-work/#tokenisation0
switch (state_) {
HTML_BEGIN_STATE(kDataState) {
diff --git a/third_party/blink/renderer/core/layout/ng/inline/ng_inline_node.cc b/third_party/blink/renderer/core/layout/ng/inline/ng_inline_node.cc
index fdd620bca319..4cc5d4f5c6d9 100644
--- a/third_party/blink/renderer/core/layout/ng/inline/ng_inline_node.cc
+++ b/third_party/blink/renderer/core/layout/ng/inline/ng_inline_node.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include <memory>
+#include "base/record_replay.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "third_party/blink/renderer/core/frame/web_feature.h"
@@ -856,7 +857,9 @@ bool NGInlineNode::SetTextWithOffset(LayoutText* layout_text,
}
const NGInlineNodeData& NGInlineNode::EnsureData() const {
+ recordreplay::Assert("NGInlineNode::EnsureData Start");
PrepareLayoutIfNeeded();
+ recordreplay::Assert("NGInlineNode::EnsureData #1");
return Data();
}
diff --git a/third_party/blink/renderer/core/scroll/scroll_animator_mac.mm b/third_party/blink/renderer/core/scroll/scroll_animator_mac.mm
index 51d9ba907133..3a9f3bc3270c 100644
--- a/third_party/blink/renderer/core/scroll/scroll_animator_mac.mm
+++ b/third_party/blink/renderer/core/scroll/scroll_animator_mac.mm
@@ -30,6 +30,7 @@
#include <memory>
#include "base/mac/scoped_cftyperef.h"
#include "base/memory/scoped_policy.h"
+#include "base/record_replay.h"
#include "third_party/blink/public/platform/platform.h"
#include "third_party/blink/renderer/core/scroll/ns_scroller_imp_details.h"
#include "third_party/blink/renderer/core/scroll/scrollable_area.h"
@@ -671,6 +672,11 @@ class BlinkScrollbarPartAnimationTimer {
- (void)invalidate {
_scrollbar = 0;
+
+ // Messages implemented by blink will not currently be replayed.
+ // Pass through events in these messages to avoid interacting with the recording.
+ recordreplay::AutoPassThroughEvents pt;
+
BEGIN_BLOCK_OBJC_EXCEPTIONS;
[_knobAlphaAnimation invalidate];
[_trackAlphaAnimation invalidate];
diff --git a/third_party/blink/renderer/platform/scheduler/common/throttling/throttled_time_domain.cc b/third_party/blink/renderer/platform/scheduler/common/throttling/throttled_time_domain.cc
index 828a6d36710d..2334fc6bafd2 100644
--- a/third_party/blink/renderer/platform/scheduler/common/throttling/throttled_time_domain.cc
+++ b/third_party/blink/renderer/platform/scheduler/common/throttling/throttled_time_domain.cc
@@ -4,6 +4,7 @@
#include "third_party/blink/renderer/platform/scheduler/common/throttling/throttled_time_domain.h"
+#include "base/record_replay.h"
#include "base/task/sequence_manager/sequence_manager.h"
namespace blink {
@@ -38,19 +39,28 @@ void ThrottledTimeDomain::SetNextTaskRunTime(base::TimeTicks run_time) {
base::Optional<base::TimeDelta> ThrottledTimeDomain::DelayTillNextTask(
base::sequence_manager::LazyNow* lazy_now) {
+ recordreplay::Assert("ThrottledTimeDomain::DelayTillNextTask Start");
+
base::TimeTicks now = lazy_now->Now();
- if (next_task_run_time_ && next_task_run_time_ > now)
+ if (next_task_run_time_ && next_task_run_time_ > now) {
+ recordreplay::Assert("ThrottledTimeDomain::DelayTillNextTask #1");
return next_task_run_time_.value() - now;
+ }
base::Optional<base::TimeTicks> next_run_time = NextScheduledRunTime();
- if (!next_run_time)
+ if (!next_run_time) {
+ recordreplay::Assert("ThrottledTimeDomain::DelayTillNextTask #2");
return base::nullopt;
+ }
- if (now >= next_run_time)
+ if (now >= next_run_time) {
+ recordreplay::Assert("ThrottledTimeDomain::DelayTillNextTask #3");
return base::TimeDelta(); // Makes DoWork post an immediate continuation.
+ }
// We assume the owner (i.e. TaskQueueThrottler) will manage wake-ups on our
// behalf.
+ recordreplay::Assert("ThrottledTimeDomain::DelayTillNextTask Done");
return base::nullopt;
}
diff --git a/third_party/blink/renderer/platform/widget/input/main_thread_event_queue.cc b/third_party/blink/renderer/platform/widget/input/main_thread_event_queue.cc
index 3b0ef487af7a..3a89ef534702 100644
--- a/third_party/blink/renderer/platform/widget/input/main_thread_event_queue.cc
+++ b/third_party/blink/renderer/platform/widget/input/main_thread_event_queue.cc
@@ -9,6 +9,7 @@
#include "base/bind.h"
#include "base/containers/circular_deque.h"
#include "base/metrics/histogram_macros.h"
+#include "base/record_replay.h"
#include "cc/metrics/event_metrics.h"
#include "third_party/blink/public/common/features.h"
#include "third_party/blink/public/common/input/web_coalesced_input_event.h"
@@ -263,6 +264,7 @@ MainThreadEventQueue::MainThreadEventQueue(
needs_low_latency_(false),
needs_unbuffered_input_for_debugger_(false),
allow_raf_aligned_input_(allow_raf_aligned_input),
+ shared_state_lock_("MainThreadEventQueue.shared_state_lock_"),
main_task_runner_(main_task_runner),
main_thread_scheduler_(main_thread_scheduler) {
raf_fallback_timer_ = std::make_unique<base::OneShotTimer>();
@@ -281,6 +283,8 @@ void MainThreadEventQueue::HandleEvent(
const WebInputEventAttribution& attribution,
std::unique_ptr<cc::EventMetrics> metrics,
HandledEventCallback callback) {
+ recordreplay::Assert("MainThreadEventQueue::HandleEvent Start");
+
TRACE_EVENT2("input", "MainThreadEventQueue::HandleEvent", "dispatch_type",
original_dispatch_type, "event_type", event->Event().GetType());
DCHECK(original_dispatch_type == DispatchType::kBlocking ||
@@ -359,8 +363,10 @@ void MainThreadEventQueue::HandleEvent(
WebInputEvent::Type::kPointerRawUpdate,
static_cast<const WebMouseEvent&>(event->Event())),
event->latency_info());
+ recordreplay::Assert("MainThreadEventQueue::HandleEvent #1");
QueueEvent(QueuedWebInputEvent::CreateForRawEvent(
std::move(raw_event), attribution, metrics.get()));
+ recordreplay::Assert("MainThreadEventQueue::HandleEvent #2");
} else if (event->Event().GetType() == WebInputEvent::Type::kTouchMove) {
const WebTouchEvent& touch_event =
static_cast<const WebTouchEvent&>(event->Event());
@@ -372,8 +378,10 @@ void MainThreadEventQueue::HandleEvent(
event->latency_info());
raw_event->EventPointer()->SetType(
WebInputEvent::Type::kPointerRawUpdate);
+ recordreplay::Assert("MainThreadEventQueue::HandleEvent #3");
QueueEvent(QueuedWebInputEvent::CreateForRawEvent(
std::move(raw_event), attribution, metrics.get()));
+ recordreplay::Assert("MainThreadEventQueue::HandleEvent #4");
}
}
}
@@ -390,12 +398,16 @@ void MainThreadEventQueue::HandleEvent(
IsForwardedAndSchedulerKnown(ack_result), attribution,
std::move(metrics));
+ recordreplay::Assert("MainThreadEventQueue::HandleEvent #5");
QueueEvent(std::move(queued_event));
+ recordreplay::Assert("MainThreadEventQueue::HandleEvent #6");
if (callback) {
std::move(callback).Run(ack_result, cloned_latency_info, nullptr,
base::nullopt);
}
+
+ recordreplay::Assert("MainThreadEventQueue::HandleEvent Done");
}
void MainThreadEventQueue::QueueClosure(base::OnceClosure closure) {
diff --git a/third_party/blink/renderer/platform/widget/input/widget_input_handler_manager.cc b/third_party/blink/renderer/platform/widget/input/widget_input_handler_manager.cc
index 55f02f8cdc71..eb52e02e4219 100644
--- a/third_party/blink/renderer/platform/widget/input/widget_input_handler_manager.cc
+++ b/third_party/blink/renderer/platform/widget/input/widget_input_handler_manager.cc
@@ -11,6 +11,7 @@
#include "base/feature_list.h"
#include "base/metrics/histogram_macros.h"
#include "base/notreached.h"
+#include "base/record_replay.h"
#include "cc/base/features.h"
#include "cc/metrics/event_metrics.h"
#include "cc/trees/layer_tree_host.h"
@@ -710,6 +711,7 @@ void WidgetInputHandlerManager::DidHandleInputEventSentToCompositor(
std::unique_ptr<InputHandlerProxy::DidOverscrollParams> overscroll_params,
const WebInputEventAttribution& attribution,
std::unique_ptr<cc::EventMetrics> metrics) {
+ recordreplay::Assert("WidgetInputHandlerManager::DidHandleInputEventSentToCompositor Start");
TRACE_EVENT1("input",
"WidgetInputHandlerManager::DidHandleInputEventSentToCompositor",
"Disposition", event_disposition);
@@ -742,6 +744,7 @@ void WidgetInputHandlerManager::DidHandleInputEventSentToCompositor(
FROM_HERE,
base::BindOnce(&WidgetInputHandlerManager::FindScrollTargetOnMainThread,
this, event_position, std::move(result_callback)));
+ recordreplay::Assert("WidgetInputHandlerManager::DidHandleInputEventSentToCompositor #1");
return;
}
@@ -769,9 +772,11 @@ void WidgetInputHandlerManager::DidHandleInputEventSentToCompositor(
HandledEventCallback handled_event = base::BindOnce(
&WidgetInputHandlerManager::DidHandleInputEventSentToMain, this,
std::move(callback));
+ recordreplay::Assert("WidgetInputHandlerManager::DidHandleInputEventSentToCompositor #1.1");
input_event_queue_->HandleEvent(std::move(event), dispatch_type, ack_state,
attribution, std::move(metrics),
std::move(handled_event));
+ recordreplay::Assert("WidgetInputHandlerManager::DidHandleInputEventSentToCompositor #2");
return;
}
@@ -784,6 +789,8 @@ void WidgetInputHandlerManager::DidHandleInputEventSentToCompositor(
allowed_touch_action_.value())
: nullptr);
}
+
+ recordreplay::Assert("WidgetInputHandlerManager::DidHandleInputEventSentToCompositor Done");
}
void WidgetInputHandlerManager::DidHandleInputEventSentToMainFromWidgetBase(
diff --git a/ui/gfx/mac/io_surface.cc b/ui/gfx/mac/io_surface.cc
index e5fd0a36f83e..0f7186b4c349 100644
--- a/ui/gfx/mac/io_surface.cc
+++ b/ui/gfx/mac/io_surface.cc
@@ -164,7 +164,8 @@ bool IOSurfaceSetColorSpace(IOSurfaceRef io_surface,
ColorSpace::MatrixID::BT2020_NCL,
ColorSpace::RangeID::LIMITED)) {
if (__builtin_available(macos 11.0, *)) {
- color_space_name = kCGColorSpaceITUR_2100_PQ;
+ // FIXME changed to fix build break.
+ color_space_name = kCGColorSpaceITUR_2020_PQ;
} else {
return true;
}
@@ -173,7 +174,8 @@ bool IOSurfaceSetColorSpace(IOSurfaceRef io_surface,
ColorSpace::MatrixID::BT2020_NCL,
ColorSpace::RangeID::LIMITED)) {
if (__builtin_available(macos 11.0, *)) {
- color_space_name = kCGColorSpaceITUR_2100_HLG;
+ // FIXME changed to fix build break.
+ color_space_name = kCGColorSpaceITUR_2020_HLG;
} else {
return true;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment