Created
July 21, 2021 09:57
-
-
Save tabokie/a290ec746a2fb887f55e0894362dcc84 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// nix = "0.18.0" | |
use nix::{ | |
errno::Errno, | |
fcntl::{self, OFlag}, | |
sys::{stat::Mode, uio::pwrite}, | |
unistd::{close, fsync}, | |
}; | |
use std::{ | |
alloc::{self, Layout}, | |
mem, | |
os::unix::io::RawFd, | |
ptr, | |
sync::{ | |
atomic::{AtomicBool, AtomicU64, Ordering}, | |
Arc, | |
}, | |
time::{Duration, Instant}, | |
}; | |
pub struct BurnerFile { | |
path: String, | |
fd: RawFd, | |
} | |
impl BurnerFile { | |
fn new(path: String, flag: OFlag) -> BurnerFile { | |
let flags = OFlag::O_RDWR | OFlag::O_CREAT | OFlag::O_TRUNC | flag; | |
let mode = Mode::S_IRUSR | Mode::S_IWUSR; | |
let fd = fcntl::open(path.as_str(), flags, mode).unwrap(); | |
BurnerFile { path, fd } | |
} | |
fn sync(&self) { | |
fsync(self.fd).unwrap(); | |
} | |
} | |
impl Drop for BurnerFile { | |
fn drop(&mut self) { | |
close(self.fd).unwrap(); | |
std::fs::remove_file(&self.path).unwrap(); | |
} | |
} | |
struct Context { | |
threads: Vec<std::thread::JoinHandle<()>>, | |
stop: Option<Arc<AtomicBool>>, | |
counter: Arc<AtomicU64>, | |
_file: Arc<BurnerFile>, | |
} | |
impl Context { | |
fn stop(&mut self) -> u64 { | |
if let Some(stop) = &self.stop { | |
stop.store(true, Ordering::Relaxed); | |
} | |
for t in self.threads.drain(..) { | |
t.join().unwrap(); | |
} | |
self.counter.load(Ordering::Relaxed) | |
} | |
fn new( | |
path: String, | |
job_count: usize, | |
flag: OFlag, | |
chunk: Arc<Vec<u8>>, | |
sync: bool, | |
) -> Context { | |
let mut threads = vec![]; | |
let stop = Arc::new(AtomicBool::new(false)); | |
let counter = Arc::new(AtomicU64::new(0)); | |
let f = Arc::new(BurnerFile::new(path, flag)); | |
for _ in 0..job_count { | |
let (stop, f, counter, chunk) = | |
(stop.clone(), f.clone(), counter.clone(), chunk.clone()); | |
let t = std::thread::spawn(move || { | |
while !stop.load(Ordering::Relaxed) { | |
let offset = counter.fetch_add(1, Ordering::Relaxed) * chunk.len() as u64; | |
// append mode will ignore offset anyway | |
pwrite_exact(f.fd, offset, &chunk); | |
if sync { | |
f.sync(); | |
} | |
} | |
}); | |
threads.push(t); | |
} | |
Context { | |
threads, | |
stop: Some(stop), | |
counter, | |
_file: f, | |
} | |
} | |
} | |
fn pwrite_exact(fd: RawFd, mut offset: u64, content: &[u8]) { | |
let mut written = 0; | |
while written < content.len() { | |
let bytes = match pwrite(fd, &content[written..], offset as _) { | |
Ok(bytes) => bytes, | |
Err(e) if e.as_errno() == Some(Errno::EAGAIN) => continue, | |
Err(e) => panic!("pwrite error {}", e), | |
}; | |
written += bytes; | |
offset += bytes as u64; | |
} | |
} | |
const TEST_TIME_SECONDS: u64 = 15; | |
// arguments: <file> <chunk_bytes> <threads> <mode> <if_manual_sync> | |
fn main() { | |
let args: Vec<String> = std::env::args().collect(); | |
let mut index = 1; | |
let path: String = args[index].clone(); | |
index += 1; | |
let chunk_size: usize = args[index].parse().unwrap(); | |
index += 1; | |
let job_count: usize = args[index].parse().unwrap(); | |
index += 1; | |
let flag_index: usize = args[index].parse().unwrap(); | |
index += 1; | |
let sync: bool = args[index].parse().unwrap(); | |
let flags = vec![ | |
(OFlag::O_APPEND, "append"), | |
(OFlag::O_DIRECT, "direct"), | |
(OFlag::O_RDWR, "none"), | |
]; | |
let flag_desc = flags[flag_index].1; | |
let mut flag = flags[flag_index].0; | |
if !sync { | |
flag = flag | OFlag::O_SYNC; | |
} | |
println!( | |
"begin threads={} block_size={} mode={} manual_sync={}", | |
job_count, chunk_size, flag_desc, sync | |
); | |
// Create 4K aligned block | |
let chunk = unsafe { | |
let layout = Layout::from_size_align(chunk_size, 4 * 1024).expect("Invalid layout"); | |
let raw: *mut u8 = mem::transmute(alloc::alloc(layout)); | |
for i in 0..chunk_size { | |
ptr::write(raw.offset(i as isize), '%' as u8); | |
} | |
Vec::from_raw_parts(raw as *mut u8, chunk_size, chunk_size) | |
}; | |
let chunk = Arc::new(chunk); | |
let (count, actual_duration) = { | |
let begin = Instant::now(); | |
let mut context = Context::new(path, job_count, flag, chunk, sync); | |
std::thread::sleep(Duration::from_secs(TEST_TIME_SECONDS)); | |
let count = context.stop(); | |
let end = Instant::now(); | |
(count, end.duration_since(begin)) | |
}; | |
println!( | |
"{} IOs, {} MiB/s, {} IO/s", | |
count, | |
count as f64 * chunk_size as f64 / actual_duration.as_secs_f64() / 1024.0 / 1024.0, | |
count as f64 / actual_duration.as_secs_f64() | |
); | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Results on a NVMe disk:
[16K thread=1]
O_DIRECT | O_SYNC: 173MB/s
O_DIRECT: 178MB/s
O_APPEND | O_SYNC: 143MB/s
O_APPEND: 140MB/s
O_SYNC: 143MB/s
none: 135MB/s
[4K thread=1]
O_DIRECT | O_SYNC: 48MB/s
O_DIRECT: 46MB/s
O_APPEND | O_SYNC: 47MB/s
O_APPEND: 46MB/s
O_SYNC: 48MB/s
none: 45MB/s