finish insert

This commit is contained in:
nora 2021-12-22 20:19:45 +01:00
parent 594ecfc089
commit eebea36b1d
3 changed files with 88 additions and 25 deletions

View file

@ -95,6 +95,13 @@ impl<T: ?Sized> Vechonk<T> {
self.raw.pop() self.raw.pop()
} }
/// Insert an element at an index.
/// * If the insertion was successful, the old element is returned.
/// * If the new element doesn't fit the gap or can't be aligned, it is returned.
pub fn insert(&mut self, index: usize, element: Box<T>) -> Result<Box<T>, Box<T>> {
self.raw.insert_elem(element, index)
}
/// An iterator over the elements yielding shared references /// An iterator over the elements yielding shared references
pub fn iter(&self) -> Iter<T> { pub fn iter(&self) -> Iter<T> {
Iter::new(self) Iter::new(self)
@ -172,8 +179,7 @@ impl<T: ?Sized> MutGuard<T> {
/// * If the element fits in the space, the old element is returned /// * If the element fits in the space, the old element is returned
/// * If the element does not fit in the space, the new element is returned again /// * If the element does not fit in the space, the new element is returned again
pub fn write(&mut self, element: Box<T>) -> Result<Box<T>, Box<T>> { pub fn write(&mut self, element: Box<T>) -> Result<Box<T>, Box<T>> {
// SAFETY: We can assume that `index` is in bounds self.raw.insert_elem(element, self.index)
unsafe { self.raw.insert_elem_unchecked(element, self.index) }
} }
} }

View file

@ -133,28 +133,32 @@ impl<T: ?Sized> RawVechonk<T> {
self.elem_size += elem_size; self.elem_size += elem_size;
self.len += 1; self.len += 1;
// SAFETY: This was allocated by `Box`, so we know that it is valid. // SAFETY: `elem_ptr` comes from a `Box<T>`
// The ownership of the value was transferred to `Vechonk` by copying it out
unsafe { unsafe {
alloc::alloc::dealloc( dealloc_box(elem_ptr);
elem_ptr as _,
Layout::from_size_align(elem_size, mem::align_of_val(&*elem_ptr)).unwrap(),
)
} }
} }
/// Insert an element at an index /// Insert an element at an index.
/// # Safety /// * If the insertion was successful, the old element is returned.
/// * The index must be in bounds /// * If the new element doesn't fit the gap or can't be aligned, it is returned.
/// pub fn insert_elem(&mut self, element: Box<T>, index: usize) -> Result<Box<T>, Box<T>> {
/// If the insertion was successful, the old element is returned. if index >= self.len {
/// If the new element doesn't fit the gap or can't be aligned, it is returned. // out of bounds
pub unsafe fn insert_elem_unchecked( return Err(element);
&mut self, }
element: Box<T>,
index: usize, /*
) -> Result<Box<T>, Box<T>> { Imagine a Vechonk<dyn Any> (1 space = one byte) that contains u8, u8, u32, u32
// this is where the free space, where we could place the element starts Our `index` here is 2, we want to replace the 9847u32 with a 0u8
We actually want to write that u8 to where the padding was before, for optimization reasons
1|2 ¦98473875
------------------
1|20 3875
*/
// this is where the free space, including padding, where we could place the element starts
// since there might be padding for the previous element, this is sometimes before `elem_offset` // since there might be padding for the previous element, this is sometimes before `elem_offset`
let free_space_start_offset = if index == 0 { let free_space_start_offset = if index == 0 {
self.cap self.cap
@ -195,7 +199,38 @@ impl<T: ?Sized> RawVechonk<T> {
return Err(element); return Err(element);
} }
todo!() // SAFETY: `index` is not out of bounds, and we are overwriting the element afterwards
let old_elem = unsafe { self.box_elem_unchecked(index) };
let elem_ptr = Box::into_raw(element);
// SAFETY: `new_elem_starting_offset` has been calculated to fall within the allocation
let new_elem_start_ptr = unsafe { self.ptr.as_ptr().add(new_elem_starting_offset) };
// SAFETY: The allocation can't overlap because both own the memory, `elem_ptr` comes from box
// we have checked that there's enough space behind `new_elem_start_ptr`
// `elem_size` is the size of the element, obtained by `sizeof_elem`
unsafe {
ptr::copy_nonoverlapping::<u8>(elem_ptr as *mut u8, new_elem_start_ptr, elem_size)
};
// SAFETY: `index` is not out of bounds, and we are overwriting the element afterwards
let data_ptr = unsafe { self.get_data_ptr(index) };
let meta = ptr::metadata(elem_ptr);
let new_data: PtrData<T> = PtrData {
offset: new_elem_starting_offset,
meta,
};
// SAFETY: We can assume that `get_data_ptr` returns valid pointers to `PtrData<T>`
unsafe { *data_ptr = new_data };
// SAFETY: `elem_ptr` comes from the box
unsafe { dealloc_box(elem_ptr) };
Ok(old_elem)
} }
pub fn pop(&mut self) -> Option<Box<T>> { pub fn pop(&mut self) -> Option<Box<T>> {
@ -245,7 +280,7 @@ impl<T: ?Sized> RawVechonk<T> {
// SAFETY: The new allocation doesn't overlap, `box_ptr` was just allocated and is non_null // SAFETY: The new allocation doesn't overlap, `box_ptr` was just allocated and is non_null
// For `elem_ptr`, see safety comments above, the size was obtained above as well // For `elem_ptr`, see safety comments above, the size was obtained above as well
unsafe { unsafe {
ptr::copy_nonoverlapping(elem_ptr, box_ptr, elem_size); ptr::copy_nonoverlapping::<u8>(elem_ptr, box_ptr, elem_size);
} }
// SAFETY: See above for both variables. `data.meta` is the valid metadata for the element // SAFETY: See above for both variables. `data.meta` is the valid metadata for the element
@ -287,7 +322,7 @@ impl<T: ?Sized> RawVechonk<T> {
// copy the elements first // copy the elements first
// SAFETY: both pointers point to the start of allocations smaller than `self.elem_size` and own them // SAFETY: both pointers point to the start of allocations smaller than `self.elem_size` and own them
unsafe { unsafe {
ptr::copy_nonoverlapping(old_ptr, self.ptr.as_ptr(), self.elem_size); ptr::copy_nonoverlapping::<u8>(old_ptr, self.ptr.as_ptr(), self.elem_size);
} }
// then copy the data // then copy the data
@ -295,7 +330,7 @@ impl<T: ?Sized> RawVechonk<T> {
unsafe { unsafe {
let new_data_ptr = self.ptr.as_ptr().add(self.offset_for_data(last_data_index)); let new_data_ptr = self.ptr.as_ptr().add(self.offset_for_data(last_data_index));
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping::<u8>(
old_ptr.add(old_data_offset), old_ptr.add(old_data_offset),
new_data_ptr, new_data_ptr,
self.data_section_size(), self.data_section_size(),
@ -410,3 +445,17 @@ impl<T: ?Sized> RawVechonk<T> {
mem::align_of::<PtrData<T>>() mem::align_of::<PtrData<T>>()
} }
} }
/// Deallocates memory from a `Box<T>`
/// # Safety
/// `ptr` must point to an allocation from a `Box<T>`, and must be safe to free
unsafe fn dealloc_box<T: ?Sized>(ptr: *mut T) {
// SAFETY: This was allocated by `Box`, so we know that it is valid.
// The ownership of the value was transferred to `Vechonk` by copying it out
unsafe {
alloc::alloc::dealloc(
ptr as _,
Layout::from_size_align(mem::size_of_val(&*ptr), mem::align_of_val(&*ptr)).unwrap(),
)
}
}

View file

@ -245,7 +245,6 @@ fn get_mut_deref() {
} }
#[test] #[test]
#[ignore]
fn get_mut_mutating() { fn get_mut_mutating() {
let mut chonk1: Vechonk<str> = vechonk!["hello".into(), "uwu".into()]; let mut chonk1: Vechonk<str> = vechonk!["hello".into(), "uwu".into()];
@ -257,6 +256,15 @@ fn get_mut_mutating() {
assert_eq!(&*hello, "owo"); assert_eq!(&*hello, "owo");
} }
#[test]
fn insert() {
let mut chonk: Vechonk<str> = vechonk!["hello".into(), "uwu".into()];
chonk.insert(0, "owo".into()).unwrap();
assert_eq!(&chonk[0], "owo");
}
#[test] #[test]
#[ignore] #[ignore]
fn zst() { fn zst() {