commit 763b057fe3659775b3c0d35e4725cae3cb63a74c Author: Nilstrieb <48135649+Nilstrieb@users.noreply.github.com> Date: Fri Aug 19 22:41:20 2022 +0200 yeah diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4fffb2f --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..fe3b25c --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "awwoc" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +libc = "0.2.132" diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..fe2f6f4 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,163 @@ +#![feature(strict_provenance)] +#![allow(dead_code)] + +use std::{ + alloc::GlobalAlloc, + io::Write, + mem, + ptr::{addr_of_mut, null_mut, NonNull}, + sync::{Mutex, MutexGuard}, +}; + +mod map; + +fn lock(mutex: &Mutex) -> MutexGuard<'_, T> { + match mutex.lock().map_err(|e| e.into_inner()) { + Ok(t) => t, + Err(t) => t, + } +} + +const BLOCK_REF_BLOCK_SIZE: usize = 4096; +const BLOCK_REF_BLOCK_AMOUNT: usize = BLOCK_REF_BLOCK_SIZE / std::mem::size_of::(); + +pub struct Awwoc; + +unsafe fn allow_block_ref_block() -> Option> { + let new_ptr = map::map(BLOCK_REF_BLOCK_SIZE)?; + + // we have to allocate some space for the BlockRefs themselves + + let block = new_ptr.cast::(); + Some(block) +} + +unsafe fn allow_inner(layout: std::alloc::Layout) -> Option> { + // SAFETY: soup + + let mut root = lock(&BLOCK); + + // first, try to find something in the free list + if let Some(mut free_block) = root.next_free_block { + let prev_next_ptr = addr_of_mut!(root.next_free_block); + loop { + let block_ref_ptr = free_block.as_ptr(); + let block_ref = block_ref_ptr.read(); + + if block_ref.size <= layout.size() { + prev_next_ptr.write(block_ref.next_free_block); + (*block_ref_ptr).next_free_block = None; + return NonNull::new(block_ref.start); + } + + match block_ref.next_free_block { + Some(block) => free_block = block, + None => break, + } + } + } + + // nothing free, we have to allocate + let first_block = match root.first_block { + Some(block) => block, + None => { + let block_ref_block = allow_block_ref_block()?; + root.first_block = Some(block_ref_block); + + block_ref_block + } + }; + + let prev_last_block = root.last_block; + + let new_block_ptr = if root.block_count < BLOCK_REF_BLOCK_AMOUNT { + // just append another block + let ptr = first_block.as_ptr().add(root.block_count); + root.last_block = NonNull::new(ptr); + ptr + } else { + let new_block_ref_block = allow_block_ref_block()?; + let last_ptr = root.last_block?; + + (*last_ptr.as_ptr()).next = Some(new_block_ref_block); + + root.last_block = Some(new_block_ref_block); + + new_block_ref_block.as_ptr() + }; + + let size = layout.size(); + let new_data_ptr = map::map(size)?; + + root.block_count += 1; + + if let Some(prev_last_block) = prev_last_block { + (*prev_last_block.as_ptr()).next = NonNull::new(new_block_ptr); + } + + new_block_ptr.write(BlockRef { + start: new_data_ptr.as_ptr(), + size, + next: None, + next_free_block: None, + }); + + Some(new_data_ptr) +} + +unsafe impl GlobalAlloc for Awwoc { + unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 { + match allow_inner(layout) { + Some(ptr) => ptr.as_ptr(), + None => null_mut(), + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, _layout: std::alloc::Layout) { + return; + + let mut root = lock(&BLOCK); + + let mut option_block = root.first_block; + while let Some(block) = option_block { + let block_ptr = block.as_ptr(); + + if (*block_ptr).start == ptr { + let free = mem::replace(&mut root.next_free_block, Some(block)); + (*block_ptr).next_free_block = free; + return; + } + + option_block = (*block_ptr).next; + } + + let _ = std::io::stderr().write_all("invalid pointer passed to dealloc\n".as_bytes()); + libc::abort(); + } +} + +static BLOCK: Mutex = Mutex::new(RootNode { + first_block: None, + last_block: None, + block_count: 0, + next_free_block: None, +}); + +struct RootNode { + first_block: Option>, + last_block: Option>, + block_count: usize, + next_free_block: Option>, +} + +unsafe impl Send for RootNode {} +unsafe impl Sync for RootNode {} + +#[repr(C)] +struct BlockRef { + start: *mut u8, + size: usize, + next: Option>, + /// only present on freed blocks + next_free_block: Option>, +} diff --git a/src/map.rs b/src/map.rs new file mode 100644 index 0000000..37b9b7f --- /dev/null +++ b/src/map.rs @@ -0,0 +1,39 @@ +use std::ptr::{self, NonNull}; + +pub unsafe fn map(len: usize) -> Option> { + let prot = libc::PROT_READ | libc::PROT_WRITE; + let flags = libc::MAP_PRIVATE | libc::MAP_ANONYMOUS; + let ptr = libc::mmap(ptr::null_mut(), len, prot, flags, 0, 0).cast(); + + if is_invalid(ptr) { + None + } else { + Some(NonNull::new_unchecked(ptr)) + } +} + +pub unsafe fn unmap(addr: *mut u8, len: usize) { + libc::munmap(addr.cast(), len); +} + +pub fn is_invalid(ptr: *mut u8) -> bool { + ptr.is_null() || ptr.addr() == 0xffffffffffffffff +} + +#[cfg(test)] +mod tests { + #[test] + fn map_write_unmap() { + unsafe { + let ptr = super::map(1000).unwrap(); + let ptr = ptr.as_ptr(); + + assert_eq!(ptr.read(), 0); + + ptr.write_volatile(5); + assert_eq!(ptr.read_volatile(), 5); + + super::unmap(ptr, 1000); + } + } +} diff --git a/tests/collections.rs b/tests/collections.rs new file mode 100644 index 0000000..bb651e1 --- /dev/null +++ b/tests/collections.rs @@ -0,0 +1,28 @@ +use std::collections::BTreeMap; + +use awwoc::Awwoc; + +#[global_allocator] +static AWWOC: Awwoc = Awwoc; + +#[test] +fn vec() { + let mut vec = Vec::new(); + + for i in 0..10_000 { + vec.push(i); + } + + assert!(vec.iter().enumerate().all(|(i, &item)| i == item)); +} + +#[test] +fn btree_map() { + let mut map = BTreeMap::new(); + + for i in (0..1000).map(|i| i * 3) { + map.insert(i, i + 10); + } + + assert!(map.iter().all(|(k, v)| *v == *k + 10)); +}