Skip to content

Instantly share code, notes, and snippets.

@tomaka
Created July 19, 2020 19:03
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save tomaka/fe7addcadff48ae2e24c4d5fe2a853ca to your computer and use it in GitHub Desktop.
Save tomaka/fe7addcadff48ae2e24c4d5fe2a853ca to your computer and use it in GitHub Desktop.
diff --git a/kernel/standalone/src/mem_alloc.rs b/kernel/standalone/src/mem_alloc.rs
index 83594be..0bb08b7 100644
--- a/kernel/standalone/src/mem_alloc.rs
+++ b/kernel/standalone/src/mem_alloc.rs
@@ -13,7 +13,8 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
-use core::ops::Range;
+use alloc::alloc::{GlobalAlloc, Layout};
+use core::{ops::Range, sync::atomic};
/// Initialize the memory allocator.
///
@@ -53,14 +54,121 @@ pub unsafe fn initialize(ranges: impl Iterator<Item = Range<usize>>) {
}
assert!(range.end >= range.start);
- ALLOCATOR.lock().init(range.start, range.end - range.start);
+ ALLOCATOR
+ .0
+ .lock()
+ .init(range.start, range.end - range.start);
}
#[global_allocator]
-static ALLOCATOR: linked_list_allocator::LockedHeap = linked_list_allocator::LockedHeap::empty();
+static ALLOCATOR: CountersWrapper<linked_list_allocator::LockedHeap> =
+ CountersWrapper(linked_list_allocator::LockedHeap::empty());
#[cfg(not(any(test, doc, doctest)))]
#[alloc_error_handler]
fn alloc_error_handler(layout: core::alloc::Layout) -> ! {
panic!("allocation of 0x{:x} bytes failed", layout.size())
}
+
+struct CountersWrapper<T>(T);
+
+unsafe impl<T: GlobalAlloc> GlobalAlloc for CountersWrapper<T> {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ let _timer = TimeReport::new();
+ self.0.alloc(layout)
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ let _timer = TimeReport::new();
+ self.0.dealloc(ptr, layout)
+ }
+
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ let _timer = TimeReport::new();
+ self.0.alloc_zeroed(layout)
+ }
+
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ let _timer = TimeReport::new();
+ self.0.realloc(ptr, layout, new_size)
+ }
+}
+
+struct TimeReport(u64);
+
+impl TimeReport {
+ fn new() -> Self {
+ TimeReport(unsafe { core::arch::x86_64::_rdtsc() })
+ }
+}
+
+impl Drop for TimeReport {
+ fn drop(&mut self) {
+ let elapsed = unsafe { core::arch::x86_64::_rdtsc() } - self.0;
+
+ // TODO: from random sampling QEmu, we assume that there are 2_000_000_000 cycles per
+ // second, which means that each cycle is half a nanosecond
+
+ let elapsed_micros = elapsed / 2 / 1_000;
+
+ if elapsed < 4 {
+ COUNTER_LESS_4_US.fetch_add(1, atomic::Ordering::Relaxed);
+ }
+ if elapsed < 16 {
+ COUNTER_LESS_16_US.fetch_add(1, atomic::Ordering::Relaxed);
+ }
+ if elapsed < 64 {
+ COUNTER_LESS_64_US.fetch_add(1, atomic::Ordering::Relaxed);
+ }
+ if elapsed < 256 {
+ COUNTER_LESS_256_US.fetch_add(1, atomic::Ordering::Relaxed);
+ }
+ if elapsed < 1024 {
+ COUNTER_LESS_1024_US.fetch_add(1, atomic::Ordering::Relaxed);
+ }
+ if elapsed < 4096 {
+ COUNTER_LESS_4096_US.fetch_add(1, atomic::Ordering::Relaxed);
+ }
+ if elapsed < 16384 {
+ COUNTER_LESS_16384_US.fetch_add(1, atomic::Ordering::Relaxed);
+ }
+ if elapsed < 65536 {
+ COUNTER_LESS_65536_US.fetch_add(1, atomic::Ordering::Relaxed);
+ }
+ COUNTER_TOTAL.fetch_add(1, atomic::Ordering::Relaxed);
+
+ if COUNTER_TOTAL.load(atomic::Ordering::Relaxed) >= 65536 {
+ panic!(
+ "{:?} {:?} {:?} {:?} {:?} {:?} {:?} {:?} {:?}",
+ COUNTER_LESS_4_US.load(atomic::Ordering::Relaxed),
+ COUNTER_LESS_16_US.load(atomic::Ordering::Relaxed),
+ COUNTER_LESS_64_US.load(atomic::Ordering::Relaxed),
+ COUNTER_LESS_256_US.load(atomic::Ordering::Relaxed),
+ COUNTER_LESS_1024_US.load(atomic::Ordering::Relaxed),
+ COUNTER_LESS_4096_US.load(atomic::Ordering::Relaxed),
+ COUNTER_LESS_16384_US.load(atomic::Ordering::Relaxed),
+ COUNTER_LESS_65536_US.load(atomic::Ordering::Relaxed),
+ COUNTER_TOTAL.load(atomic::Ordering::Relaxed)
+ );
+ }
+ }
+}
+
+/// Counter incremented whenever a memory allocation or deallocation takes less than 4µs.
+static COUNTER_LESS_4_US: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+/// Counter incremented whenever a memory allocation or deallocation takes less than 16µs.
+static COUNTER_LESS_16_US: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+/// Counter incremented whenever a memory allocation or deallocation takes less than 64µs.
+static COUNTER_LESS_64_US: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+/// Counter incremented whenever a memory allocation or deallocation takes less than 256µs.
+static COUNTER_LESS_256_US: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+/// Counter incremented whenever a memory allocation or deallocation takes less than 1ms.
+static COUNTER_LESS_1024_US: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+/// Counter incremented whenever a memory allocation or deallocation takes less than 4ms.
+static COUNTER_LESS_4096_US: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+/// Counter incremented whenever a memory allocation or deallocation takes less than 16ms.
+static COUNTER_LESS_16384_US: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+/// Counter incremented whenever a memory allocation or deallocation takes less than 65ms.
+static COUNTER_LESS_65536_US: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
+/// Counter incremented at every single memory allocation and deallocation.
+static COUNTER_TOTAL: atomic::AtomicUsize = atomic::AtomicUsize::new(0);
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment