fix blockref allocation garbage

This commit is contained in:
nora 2022-08-20 18:42:15 +02:00
parent 3c598064ed
commit 6c94456b17
2 changed files with 60 additions and 50 deletions

View file

@ -42,9 +42,8 @@ unsafe impl GlobalAlloc for Awwoc {
unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 { unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 {
let mut root = lock(&BLOCK); let mut root = lock(&BLOCK);
eprintln!("alloc....");
match root.alloc_inner(layout) { match root.alloc_inner(layout) {
Some(ptr) => dbg!(ptr.as_ptr()), Some(ptr) => ptr.as_ptr(),
None => null_mut(), None => null_mut(),
} }
} }
@ -109,6 +108,11 @@ struct RootNode {
next_free_block: Option<NonNull<BlockRef>>, next_free_block: Option<NonNull<BlockRef>>,
} }
struct BlockRefBlock {
start: NonNull<BlockRef>,
len: usize,
}
impl RootNode { impl RootNode {
unsafe fn find_in_free_list(&mut self, size: usize) -> Option<NonNull<u8>> { unsafe fn find_in_free_list(&mut self, size: usize) -> Option<NonNull<u8>> {
if let Some(mut current_block) = self.next_free_block { if let Some(mut current_block) = self.next_free_block {
@ -137,11 +141,22 @@ impl RootNode {
None None
} }
#[inline(never)]
unsafe fn new_blockref(&mut self) -> Option<NonNull<BlockRef>> { unsafe fn new_blockref(&mut self) -> Option<NonNull<BlockRef>> {
let blockref_block_offset = self.block_count % BLOCK_REF_BLOCK_AMOUNT; let last_br_amount = self.block_count % BLOCK_REF_BLOCK_AMOUNT;
let new_block_ptr = if blockref_block_offset == 0 {
eprintln!("time to make a new blockref alloc"); let new_block_ptr = if last_br_amount > 0 {
// just append another block
// last_block points the the correct br_block for adding a new br
// we just need to offset it
let last_block = self
.last_block
.unwrap_or_else(|| abort("last_block not found even though count is nonnull\n"));
let new_br_block = last_block.as_ptr().add(1);
self.last_block = NonNull::new(new_br_block);
new_br_block
} else {
// our current blockref block is full, we need a new one // our current blockref block is full, we need a new one
let new_block_ref_block = alloc_block_ref_block()?; let new_block_ref_block = alloc_block_ref_block()?;
@ -152,24 +167,6 @@ impl RootNode {
self.last_block = Some(new_block_ref_block); self.last_block = Some(new_block_ref_block);
new_block_ref_block.as_ptr() new_block_ref_block.as_ptr()
} else {
eprintln!("appending to current blockref alloc");
// just append another block
let last_block = self
.last_block
.unwrap_or_else(|| abort("last_block not found even though count is nonnull\n"));
let index_from_back = BLOCK_REF_BLOCK_AMOUNT - blockref_block_offset;
let new_block_ref_block = last_block.as_ptr().sub(index_from_back);
if let Some(last_ptr) = self.last_block {
(*last_ptr.as_ptr()).next = NonNull::new(new_block_ref_block);
}
self.last_block = NonNull::new(new_block_ref_block);
new_block_ref_block
}; };
NonNull::new(new_block_ptr) NonNull::new(new_block_ptr)
@ -183,29 +180,21 @@ impl RootNode {
return Some(ptr); return Some(ptr);
} }
eprintln!("no free list");
// nothing free, we have to allocate // nothing free, we have to allocate
let prev_last_block = self.last_block; let prev_last_block = self.last_block;
let new_blockref_ptr = self.new_blockref()?; let new_blockref_ptr = self.new_blockref()?;
eprintln!("got block ref");
let size = layout.size(); let size = layout.size();
let new_data_ptr = map::map(size)?; let new_data_ptr = map::map(size)?;
eprintln!("mapped");
self.block_count += 1; self.block_count += 1;
if let Some(prev_last_block) = prev_last_block { if let Some(prev_last_block) = prev_last_block {
(*prev_last_block.as_ptr()).next = Some(new_blockref_ptr); (*prev_last_block.as_ptr()).next = Some(new_blockref_ptr);
} }
eprintln!("what");
new_blockref_ptr.as_ptr().write(BlockRef { new_blockref_ptr.as_ptr().write(BlockRef {
start: new_data_ptr.as_ptr(), start: new_data_ptr.as_ptr(),
size, size,
@ -213,14 +202,12 @@ impl RootNode {
next_free_block: None, next_free_block: None,
}); });
eprintln!("uwu what"); Some(new_data_ptr)
dbg!(Some(new_data_ptr))
} }
} }
// SAFETY: I guess
unsafe impl Send for RootNode {} unsafe impl Send for RootNode {}
unsafe impl Sync for RootNode {}
#[repr(C)] #[repr(C)]
struct BlockRef { struct BlockRef {

View file

@ -1,23 +1,46 @@
use std::ptr::{self, NonNull}; #[cfg(all(unix, not(miri)))]
pub use unix::*;
pub unsafe fn map(len: usize) -> Option<NonNull<u8>> { #[cfg(miri)]
let prot = libc::PROT_READ | libc::PROT_WRITE; pub use miri::*;
let flags = libc::MAP_PRIVATE | libc::MAP_ANONYMOUS;
let ptr = libc::mmap(ptr::null_mut(), len, prot, flags, 0, 0).cast();
if is_invalid(ptr) { #[cfg(unix)]
None mod unix {
} else { use std::ptr::{self, NonNull};
Some(NonNull::new_unchecked(ptr))
pub unsafe fn map(len: usize) -> Option<NonNull<u8>> {
let prot = libc::PROT_READ | libc::PROT_WRITE;
let flags = libc::MAP_PRIVATE | libc::MAP_ANONYMOUS;
let ptr = libc::mmap(ptr::null_mut(), len, prot, flags, -1, 0).cast();
if is_invalid(ptr) {
None
} else {
Some(NonNull::new_unchecked(ptr))
}
}
pub unsafe fn unmap(addr: *mut u8, len: usize) {
libc::munmap(addr.cast(), len);
}
pub fn is_invalid(ptr: *mut u8) -> bool {
ptr.is_null() || ptr.addr() == 0xffffffffffffffff
} }
} }
pub unsafe fn unmap(addr: *mut u8, len: usize) { #[cfg(miri)]
libc::munmap(addr.cast(), len); mod miri {
} use std::alloc::{GlobalAlloc, System, Layout};
use std::ptr::NonNull;
pub fn is_invalid(ptr: *mut u8) -> bool { pub unsafe fn map(len: usize) -> Option<NonNull<u8>> {
ptr.is_null() || ptr.addr() == 0xffffffffffffffff NonNull::new(System.alloc_zeroed(Layout::from_size_align(len, 4096).unwrap()))
}
pub unsafe fn unmap(addr: *mut u8, len: usize) {
System.dealloc(addr, std::alloc::Layout::array::<u8>(len).unwrap())
}
} }
#[cfg(test)] #[cfg(test)]