Skip to content

Instantly share code, notes, and snippets.

@jmarantz
Last active August 1, 2020 21:58
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jmarantz/838cb6de7e74c0970ea6b63eded0139a to your computer and use it in GitHub Desktop.
Save jmarantz/838cb6de7e74c0970ea6b63eded0139a to your computer and use it in GitHub Desktop.
diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc
index 81e968177d..73da5d1614 100644
--- a/source/common/stats/thread_local_store.cc
+++ b/source/common/stats/thread_local_store.cc
@@ -42,6 +42,7 @@ ThreadLocalStoreImpl::~ThreadLocalStoreImpl() {
ASSERT(shutting_down_ || !threading_ever_initialized_);
default_scope_.reset();
ASSERT(scopes_.empty());
+ ASSERT(histograms_to_cleanup_.empty());
}
void ThreadLocalStoreImpl::setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) {
@@ -268,9 +269,20 @@ void ThreadLocalStoreImpl::releaseHistogramCrossThread(uint64_t histogram_id) {
// This can happen from any thread. We post() back to the main thread which will initiate the
// cache flush operation.
if (!shutting_down_ && main_thread_dispatcher_) {
- main_thread_dispatcher_->post([this, histogram_id]() {
- clearHistogramFromCaches(histogram_id);
- });
+ // It's possible that many different histograms will be deleted at the same
+ // time, before the main thread gets a chance to run
+ // clearHistogramsFromCaches. If a new histogram is deleted before that
+ // post runs, we add it to our list of histograms to clear, and there's no
+ // need to issue another post.
+ bool need_post = false;
+ {
+ Thread::LockGuard lock(hist_mutex_);
+ need_post = histograms_to_cleanup_.empty();
+ histograms_to_cleanup_.push_back(histogram_id);
+ }
+ if (need_post) {
+ main_thread_dispatcher_->post([this]() { clearHistogramsFromCaches(); });
+ }
}
}
@@ -280,8 +292,10 @@ ThreadLocalStoreImpl::TlsCache::insertScope(uint64_t scope_id) {
}
void ThreadLocalStoreImpl::TlsCache::eraseScope(uint64_t scope_id) { scope_cache_.erase(scope_id); }
-void ThreadLocalStoreImpl::TlsCache::eraseHistogram(uint64_t histogram_id) {
- histogram_cache_.erase(histogram_id);
+void ThreadLocalStoreImpl::TlsCache::eraseHistograms(const std::vector<uint64_t>& histograms) {
+ for (uint64_t histogram_id : histograms) {
+ histogram_cache_.erase(histogram_id);
+ }
}
void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id,
@@ -296,19 +310,21 @@ void ThreadLocalStoreImpl::clearScopeFromCaches(uint64_t scope_id,
}
}
-void ThreadLocalStoreImpl::clearHistogramFromCaches(uint64_t histogram_id) {
+void ThreadLocalStoreImpl::clearHistogramsFromCaches() {
// If we are shutting down we no longer perform cache flushes as workers may be shutting down
// at the same time.
if (!shutting_down_) {
- // Perform a cache flush on all threads.
- //
- // TODO(jmarantz): If this cross-thread posting proves to be a performance
- // bottleneck,
- // https://gist.github.com/jmarantz/838cb6de7e74c0970ea6b63eded0139a
- // contains a patch that will implement batching together to clear multiple
- // histograms.
+ // Capture all the pending histograms in a local, clearing the list held in
+ // this. Once this occurs, if a new histogram is deleted, a new post will be
+ // required.
+ auto histograms = std::make_shared<std::vector<uint64_t>>();
+ {
+ Thread::LockGuard lock(hist_mutex_);
+ histograms->swap(histograms_to_cleanup_);
+ }
+
tls_->runOnAllThreads(
- [this, histogram_id]() { tls_->getTyped<TlsCache>().eraseHistogram(histogram_id); });
+ [this, histograms]() { tls_->getTyped<TlsCache>().eraseHistograms(*histograms); });
}
}
diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h
index 63022fc71c..31f9654a7c 100644
--- a/source/common/stats/thread_local_store.h
+++ b/source/common/stats/thread_local_store.h
@@ -435,7 +435,7 @@ private:
struct TlsCache : public ThreadLocal::ThreadLocalObject {
TlsCacheEntry& insertScope(uint64_t scope_id);
void eraseScope(uint64_t scope_id);
- void eraseHistogram(uint64_t histogram);
+ void eraseHistograms(const std::vector<uint64_t>& histograms);
// The TLS scope cache is keyed by scope ID. This is used to avoid complex circular references
// during scope destruction. An ID is required vs. using the address of the scope pointer
@@ -462,7 +462,7 @@ private:
std::string getTagsForName(const std::string& name, TagVector& tags) const;
void clearScopeFromCaches(uint64_t scope_id, CentralCacheEntrySharedPtr central_cache);
- void clearHistogramFromCaches(uint64_t histogram_id);
+ void clearHistogramsFromCaches();
void releaseScopeCrossThread(ScopeImpl* scope);
void mergeInternal(PostMergeCb merge_cb);
bool rejects(StatName name) const;
@@ -513,6 +513,12 @@ private:
mutable Thread::MutexBasicLockable hist_mutex_;
StatSet<ParentHistogramImpl> histogram_set_ ABSL_GUARDED_BY(hist_mutex_);
+
+ // Histograms IDs that are queued for cross-scope release. Because there
+ // can be a large number of histograms, all of which are released at once,
+ // (e.g. when a scope is deleted), it is likely more efficient to batch their
+ // cleanup, which would otherwise entail a post() per histogram per thread.
+ std::vector<uint64_t> histograms_to_cleanup_ ABSL_GUARDED_BY(hist_mutex_);
};
using ThreadLocalStoreImplPtr = std::unique_ptr<ThreadLocalStoreImpl>;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment