Skip to content

Instantly share code, notes, and snippets.

@roblabla
Created May 28, 2019 11:19
Show Gist options
  • Save roblabla/d6ce41724866c4fc56a036ba021cb45f to your computer and use it in GitHub Desktop.
Save roblabla/d6ce41724866c4fc56a036ba021cb45f to your computer and use it in GitHub Desktop.
diff --cc kernel/src/ipc/session.rs
index 09a760fb,09a760fb..82744708
--- a/kernel/src/ipc/session.rs
+++ b/kernel/src/ipc/session.rs
@@@ -30,11 -30,11 +30,12 @@@ use crate::process::ThreadStruct
use core::sync::atomic::{AtomicUsize, Ordering};
use core::slice;
use byteorder::{LE, ByteOrder};
--use crate::paging::{MappingAccessRights, process_memory::ProcessMemory};
++use crate::paging::{MappingAccessRights, process_memory::ProcessMemory, PAGE_SIZE};
use crate::mem::{UserSpacePtr, UserSpacePtrMut, VirtualAddress};
use bit_field::BitField;
use crate::error::KernelError;
use crate::checks::check_lower_than_usize;
++use kfs_libutils::align_up;
/// Wrapper around the currently active session and the incoming request list.
/// They are kept together so they are locked together.
@@@ -191,6 -191,6 +192,17 @@@ struct Request
/// insert a result (potentially an error) in this option before waking up
/// the sender.
answered: Arc<SpinLock<Option<Result<(), UserspaceError>>>>,
++ /// A/B/W Buffers that were mapped during the request. We should unmap them
++ /// when replying.
++ buffers: Vec<Buffer>
++}
++
++#[derive(Debug)]
++struct Buffer {
++ writable: bool,
++ source_addr: VirtualAddress,
++ dest_addr: VirtualAddress,
++ size: usize
}
/// Send an IPC Buffer from the sender into the receiver.
@@@ -208,8 -208,8 +220,10 @@@
/// In practice, the performance lost by memcpying the data can be made up by not
/// requiring to flush the page table cache, so care must be taken when chosing
/// between Buffer or Pointer family of IPC.
++///
++/// Should be called from the receiver process.
#[allow(unused)]
--fn buf_map(from_buf: &[u8], to_buf: &mut [u8], curoff: &mut usize, from_mem: &mut ProcessMemory, to_mem: &mut ProcessMemory, flags: MappingAccessRights) -> Result<(), UserspaceError> {
++fn buf_map(from_buf: &[u8], to_buf: &mut [u8], curoff: &mut usize, from_mem: &mut ProcessMemory, to_mem: &mut ProcessMemory, flags: MappingAccessRights, buffers: &mut Vec<Buffer>) -> Result<(), UserspaceError> {
let lowersize = LE::read_u32(&from_buf[*curoff..*curoff + 4]);
let loweraddr = LE::read_u32(&from_buf[*curoff + 4..*curoff + 8]);
let rest = LE::read_u32(&from_buf[*curoff + 8..*curoff + 12]);
@@@ -244,12 -244,12 +258,59 @@@
// BODY: reuse it here.
// Map the descriptor in the other process.
-- let mapping = from_mem.share_existing_mapping(VirtualAddress(addr), size)?;
-- let to_addr = to_mem.find_available_space(size)?;
-- to_mem.map_shared_mapping(mapping, to_addr, MappingAccessRights::u_rw())?;
++ let to_addr_full = to_mem.find_available_space(align_up(size + (addr % PAGE_SIZE), PAGE_SIZE))?;
++ let to_addr = to_addr_full + (addr % PAGE_SIZE);
++
++ let mut size_handled = 0;
++ if addr % PAGE_SIZE != 0 {
++ // memcpy the first page.
++ let first_page_size = PAGE_SIZE - (addr % PAGE_SIZE);
++
++ let from_mapping = from_mem.mirror_mapping(VirtualAddress(addr), first_page_size)?;
++ let from = UserSpacePtr::from_raw_parts(from_mapping.addr().addr() as *const u8, from_mapping.len());
++
++ to_mem.create_regular_mapping(to_addr_full, PAGE_SIZE, MappingAccessRights::u_rw())?;
++ let mut to = UserSpacePtrMut::from_raw_parts_mut(addr as *mut u8, first_page_size);
++ to.copy_from_slice(&from);
++ size_handled += first_page_size;
++ }
++
++ if addr + size % PAGE_SIZE != 0 && (to_addr + size).floor() != to_addr_full {
++ // memcpy the last page.
++
++ let from_mapping = from_mem.mirror_mapping((VirtualAddress(addr) + size).floor(), addr + size % PAGE_SIZE)?;
++ let from = UserSpacePtr::from_raw_parts(from_mapping.addr().addr() as *const u8, from_mapping.len());
++
++ let to_last_page = (to_addr + size).floor();
++ to_mem.create_regular_mapping(to_last_page, PAGE_SIZE, MappingAccessRights::u_rw())?;
++ let mut to = UserSpacePtrMut::from_raw_parts_mut(to_last_page.addr() as *mut u8, (to_addr + size).addr() % PAGE_SIZE);
++
++ to.copy_from_slice(&from);
++ size_handled += (addr + size) % PAGE_SIZE;
++ }
++
++ if size - size_handled != 0 {
++ // Share middle pages
++
++ assert!(size - size_handled % PAGE_SIZE == 0, "Remaining size should be a multiple of PAGE_SIZE");
++
++ let addr = align_up(addr, PAGE_SIZE);
++ let to_addr = to_addr.ceil();
++
++ let (mapping, offset) = from_mem.share_existing_mapping(VirtualAddress(addr), size - size_handled)?;
++ to_mem.map_partial_shared_mapping(mapping, to_addr, offset, size - size_handled, MappingAccessRights::u_rw())?;
++ }
++
to_addr.addr()
};
++ buffers.push(Buffer {
++ writable: flags.contains(MappingAccessRights::u_w()),
++ source_addr: VirtualAddress(addr),
++ dest_addr: VirtualAddress(to_addr),
++ size: size
++ });
++
let loweraddr = to_addr as u32;
let rest = *0u32
.set_bits(0..2, bufflags)
@@@ -294,6 -294,6 +355,7 @@@ impl ClientSession
sender_bufsize: buf.len(),
answered: answered.clone(),
sender: scheduler::get_current_thread(),
++ buffers: Vec::new()
})
}
@@@ -378,16 -378,16 +440,16 @@@ impl ServerSession
/// been set by a prior call to wait.
pub fn receive(&self, mut buf: UserSpacePtrMut<[u8]>, has_c_descriptors: bool) -> Result<(), UserspaceError> {
// Read active session
-- let internal = self.0.internal.lock();
++ let mut internal = self.0.internal.lock();
// TODO: In case of a race, we might want to check that receive is only called once.
// Can races even happen ?
-- let active = internal.active_request.as_ref().unwrap();
++ let active = internal.active_request.as_mut().unwrap();
let sender = active.sender.process.clone();
-- let memlock = sender.pmemory.lock();
-- let mapping = memlock.mirror_mapping(active.sender_buf, active.sender_bufsize)?;
++ let mapping = sender.pmemory.lock().owned_mirror_mapping(active.sender_buf, active.sender_bufsize)?;
++
let sender_buf = unsafe {
slice::from_raw_parts_mut(mapping.addr().addr() as *mut u8, mapping.len())
};
@@@ -398,7 -398,7 +460,9 @@@
CBufBehavior::Disabled
};
-- pass_message(sender_buf, active.sender.clone(), &mut *buf, scheduler::get_current_thread(), false, &*memlock, c_bufs)?;
++ pass_message(sender_buf, active.sender.clone(), &mut *buf,
++ scheduler::get_current_thread(), false, &mut active.buffers,
++ c_bufs)?;
Ok(())
}
@@@ -420,20 -420,20 +484,21 @@@
// TODO: This probably has an errcode.
assert!(self.0.internal.lock().active_request.is_some(), "Called reply without an active session");
-- let active = self.0.internal.lock().active_request.take().unwrap();
++ let mut active = self.0.internal.lock().active_request.take().unwrap();
let sender = active.sender.process.clone();
-- let memlock = sender.pmemory.lock();
++ let mapping = sender.pmemory.lock().owned_mirror_mapping(active.sender_buf, active.sender_bufsize)?;
-- let mapping = memlock.mirror_mapping(active.sender_buf, active.sender_bufsize)?;
let sender_buf = unsafe {
slice::from_raw_parts_mut(mapping.addr().addr() as *mut u8, mapping.len())
};
-- pass_message(&*buf, scheduler::get_current_thread(), sender_buf, active.sender.clone(), true, &*memlock, CBufBehavior::Disabled)?;
++ let res = pass_message(&*buf, scheduler::get_current_thread(), sender_buf,
++ active.sender.clone(), true, &mut active.buffers,
++ CBufBehavior::Disabled);
-- *active.answered.lock() = Some(Ok(()));
++ *active.answered.lock() = Some(res);
scheduler::add_to_schedule_queue(active.sender.clone());
@@@ -471,7 -471,7 +536,11 @@@ enum CBufBehavior
/// This function should always be called from the context of the receiver/
/// server.
#[allow(unused)]
--fn pass_message(from_buf: &[u8], from_proc: Arc<ThreadStruct>, to_buf: &mut [u8], to_proc: Arc<ThreadStruct>, is_reply: bool, other_memlock: &ProcessMemory, c_bufs: CBufBehavior) -> Result<(), UserspaceError> {
++fn pass_message(from_buf: &[u8], from_proc: Arc<ThreadStruct>, to_buf: &mut [u8],
++ to_proc: Arc<ThreadStruct>, is_reply: bool, buffers: &mut Vec<Buffer>,
++ c_bufs: CBufBehavior)
++ -> Result<(), UserspaceError>
++{
// TODO: pass_message deadlocks when sending message to the same process.
// BODY: If from_proc and to_proc are the same process, pass_message will
// BODY: deadlock trying to acquire the locks to the handle table or the
@@@ -500,19 -500,19 +569,23 @@@
if descriptor.num_copy_handles() != 0 || descriptor.num_move_handles() != 0 {
let mut from_handle_table = from_proc.process.phandles.lock();
-- let mut to_handle_table = to_proc.process.phandles.lock();
++ let mut to_handle_table = if !Arc::ptr_eq(&from_proc, &to_proc) {
++ Some(to_proc.process.phandles.lock())
++ } else {
++ None
++ };
for i in 0..descriptor.num_copy_handles() {
let handle = LE::read_u32(&from_buf[curoff..curoff + 4]);
let handle = from_handle_table.get_handle(handle)?;
-- let handle = to_handle_table.add_handle(handle);
++ let handle = to_handle_table.as_mut().unwrap_or(&mut from_handle_table).add_handle(handle);
LE::write_u32(&mut to_buf[curoff..curoff + 4], handle);
curoff += 4;
}
for i in 0..descriptor.num_move_handles() {
let handle = LE::read_u32(&from_buf[curoff..curoff + 4]);
let handle = from_handle_table.delete_handle(handle)?;
-- let handle = to_handle_table.add_handle(handle);
++ let handle = to_handle_table.as_mut().unwrap_or(&mut from_handle_table).add_handle(handle);
LE::write_u32(&mut to_buf[curoff..curoff + 4], handle);
curoff += 4;
}
@@@ -554,18 -554,18 +627,21 @@@
check_lower_than_usize(to_size, UserspaceError::InvalidAddress)?;
check_lower_than_usize(to_addr.saturating_add(to_size), UserspaceError::InvalidAddress)?;
++ let sender_memlock = if !is_reply {
++ from_proc.process.pmemory.lock()
++ } else {
++ unimplemented!();
++ };
++
++
let (mapping, mut uspaceptr) = if !is_reply {
// We're receiving: C Buffers are in our address space, X buffers
// are in the other address space
-- let mapping = other_memlock.mirror_mapping(VirtualAddress(from_addr as usize), from_size as usize)?;
++ let mapping = sender_memlock.mirror_mapping(VirtualAddress(from_addr as usize), from_size as usize)?;
let uspaceptr = UserSpacePtrMut::from_raw_parts_mut(to_addr as *mut u8, to_size as usize);
(mapping, uspaceptr)
} else {
-- // We're receiving: C Buffers are in our address space, X buffers
-- // are in the other address space
-- let mapping = other_memlock.mirror_mapping(VirtualAddress(to_addr as usize), to_size as usize)?;
-- let uspaceptr = UserSpacePtrMut::from_raw_parts_mut(from_addr as *mut u8, from_size as usize);
-- (mapping, uspaceptr)
++ unimplemented!("Reply X buffers/Request C buffers.")
};
let (from, to) = {
@@@ -594,20 -594,20 +670,84 @@@
}
}
-- if hdr.num_a_descriptors() != 0 || hdr.num_b_descriptors() != 0 {
-- let mut from_mem = from_proc.process.pmemory.lock();
-- let mut to_mem = to_proc.process.pmemory.lock();
++ {
++ // Get the ProcessMemory of the two processes. One of them is already
++ // locked with sender_memlock. Depending on whether we're in a request or
++ // a reply, lock or ackquire sender_memlock appropriately.
-- for i in 0..hdr.num_a_descriptors() {
-- buf_map(from_buf, to_buf, &mut curoff, &mut *from_mem, &mut *to_mem, MappingAccessRights::empty())?;
-- }
++ if hdr.num_a_descriptors() != 0 || hdr.num_b_descriptors() != 0 ||
++ hdr.num_w_descriptors() != 0
++ {
++ if is_reply {
++ // TODO: Figure out which error to return.
++ return Err(UserspaceError::PortRemoteDead);
++ }
++
++ let mut from_mem = from_proc.process.pmemory.lock();
++ let mut to_mem = to_proc.process.pmemory.lock();
++
++ for i in 0..hdr.num_a_descriptors() {
++ buf_map(from_buf, to_buf, &mut curoff, &mut *from_mem,
++ &mut *to_mem, MappingAccessRights::empty(), buffers)?;
++ }
-- for i in 0..hdr.num_b_descriptors() {
-- buf_map(from_buf, to_buf, &mut curoff, &mut *from_mem, &mut *to_mem, MappingAccessRights::WRITABLE)?;
++ for i in 0..hdr.num_b_descriptors() {
++ buf_map(from_buf, to_buf, &mut curoff, &mut *from_mem,
++ &mut *to_mem, MappingAccessRights::WRITABLE, buffers)?;
++ }
++
++ for i in 0..hdr.num_w_descriptors() {
++ buf_map(from_buf, to_buf, &mut curoff, &mut *from_mem,
++ &mut *to_mem, MappingAccessRights::WRITABLE, buffers)?;
++ }
}
-- for i in 0..hdr.num_w_descriptors() {
-- buf_map(from_buf, to_buf, &mut curoff, &mut *from_mem, &mut *to_mem, MappingAccessRights::WRITABLE)?;
++ if is_reply {
++ // Unmap A-B-W buffers in the replies.
++ for buffer in buffers {
++ // Memcpy back the first and last pages.
++
++ let addr = buffer.dest_addr;
++ let size = buffer.size;
++ let to_addr = buffer.source_addr;
++ let to_addr_full = to_addr.floor();
++
++ let mut from_mem = from_proc.process.pmemory.lock();
++ let to_mem = from_proc.process.pmemory.lock();
++
++ if addr.addr() % PAGE_SIZE != 0 {
++ if buffer.writable {
++ // memcpy the first page.
++ let first_page_size = PAGE_SIZE - (addr.addr() % PAGE_SIZE);
++
++ let from = UserSpacePtr::from_raw_parts(addr.addr() as *const u8, first_page_size);
++ let to_mapping = to_mem.mirror_mapping(to_addr, first_page_size)?;
++ let mut to = UserSpacePtrMut::from_raw_parts_mut(to_mapping.addr().addr() as *mut u8, first_page_size);
++ to.copy_from_slice(&from);
++ }
++
++ from_mem.unmap(addr.floor(), PAGE_SIZE)?;
++ }
++
++ if addr.addr() + size % PAGE_SIZE != 0 && (to_addr + size).floor() != to_addr_full {
++ if buffer.writable {
++ // memcpy the last page.
++ let last_page_size = addr.addr() + size % PAGE_SIZE;
++
++ let from = UserSpacePtr::from_raw_parts(addr.addr() as *const u8, last_page_size);
++
++ let to_last_page = (to_addr + size).floor();
++ let to_mapping = to_mem.mirror_mapping(to_last_page, last_page_size)?;
++ let mut to = UserSpacePtrMut::from_raw_parts_mut(to_mapping.addr().addr() as *mut u8, last_page_size);
++
++ to.copy_from_slice(&from);
++ }
++
++ from_mem.unmap((addr + size).floor(), PAGE_SIZE)?;
++ }
++
++ from_mem.unmap(addr, size)?;
++ }
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment