diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 00a4414eb..d41dd0dc6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ on: env: RUSTFLAGS: -Dwarnings RUST_BACKTRACE: 1 - nightly: nightly-2021-04-13 + nightly: nightly-2021-11-05 defaults: run: diff --git a/tests/test_bytes_vec_alloc.rs b/tests/test_bytes_vec_alloc.rs index fc7b4da88..75708657d 100644 --- a/tests/test_bytes_vec_alloc.rs +++ b/tests/test_bytes_vec_alloc.rs @@ -1,62 +1,88 @@ #![cfg(feature = "alloc")] use std::alloc::{GlobalAlloc, Layout, System}; -use std::{mem, ptr}; +use std::ptr::null_mut; +use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use bytes::{Buf, Bytes}; #[global_allocator] -static LEDGER: Ledger = Ledger; +static LEDGER: Ledger = Ledger::new(); -struct Ledger; +const LEDGER_LENGTH: usize = 2048; -const USIZE_SIZE: usize = mem::size_of::(); +struct Ledger { + alloc_table: [(AtomicPtr, AtomicUsize); LEDGER_LENGTH], +} -unsafe impl GlobalAlloc for Ledger { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if layout.align() == 1 && layout.size() > 0 { - // Allocate extra space to stash a record of - // how much space there was. - let orig_size = layout.size(); - let size = orig_size + USIZE_SIZE; - let new_layout = match Layout::from_size_align(size, 1) { - Ok(layout) => layout, - Err(_err) => return ptr::null_mut(), - }; - let ptr = System.alloc(new_layout); - if !ptr.is_null() { - (ptr as *mut usize).write(orig_size); - let ptr = ptr.offset(USIZE_SIZE as isize); - ptr - } else { - ptr +impl Ledger { + const fn new() -> Self { + const ELEM: (AtomicPtr, AtomicUsize) = + (AtomicPtr::new(null_mut()), AtomicUsize::new(0)); + let alloc_table = [ELEM; LEDGER_LENGTH]; + + Self { alloc_table } + } + + /// Iterate over our table until we find an open entry, then insert into said entry + fn insert(&self, ptr: *mut u8, size: usize) { + for (entry_ptr, entry_size) in self.alloc_table.iter() { + // SeqCst is good enough here, we don't care about perf, i just want to be correct! + if entry_ptr + .compare_exchange(null_mut(), ptr, Ordering::SeqCst, Ordering::SeqCst) + .is_ok() + { + entry_size.store(size, Ordering::SeqCst); + break; } - } else { - System.alloc(layout) } } - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if layout.align() == 1 && layout.size() > 0 { - let off_ptr = (ptr as *mut usize).offset(-1); - let orig_size = off_ptr.read(); - if orig_size != layout.size() { - panic!( - "bad dealloc: alloc size was {}, dealloc size is {}", - orig_size, - layout.size() - ); + fn remove(&self, ptr: *mut u8) -> usize { + for (entry_ptr, entry_size) in self.alloc_table.iter() { + // set the value to be something that will never try and be deallocated, so that we + // don't have any chance of a race condition + // + // dont worry, LEDGER_LENGTH is really long to compensate for us not reclaiming space + if entry_ptr + .compare_exchange( + ptr, + usize::MAX as *mut u8, + Ordering::SeqCst, + Ordering::SeqCst, + ) + .is_ok() + { + return entry_size.load(Ordering::SeqCst); } + } - let new_layout = match Layout::from_size_align(layout.size() + USIZE_SIZE, 1) { - Ok(layout) => layout, - Err(_err) => std::process::abort(), - }; - System.dealloc(off_ptr as *mut u8, new_layout); + panic!("Couldn't find a matching entry for {:x?}", ptr); + } +} + +unsafe impl GlobalAlloc for Ledger { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let size = layout.size(); + let ptr = System.alloc(layout); + self.insert(ptr, size); + ptr + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + let orig_size = self.remove(ptr); + + if orig_size != layout.size() { + panic!( + "bad dealloc: alloc size was {}, dealloc size is {}", + orig_size, + layout.size() + ); } else { System.dealloc(ptr, layout); } } } + #[test] fn test_bytes_advance() { let mut bytes = Bytes::from(vec![10, 20, 30]);