Path: blob/main/crates/polars-utils/src/sparse_init_vec.rs
6939 views
use std::sync::atomic::{AtomicU8, AtomicUsize, Ordering};12pub struct SparseInitVec<T> {3ptr: *mut T,4len: usize,5cap: usize,67num_init: AtomicUsize,8init_mask: Vec<AtomicU8>,9}1011unsafe impl<T: Send> Send for SparseInitVec<T> {}12unsafe impl<T: Send> Sync for SparseInitVec<T> {}1314impl<T> SparseInitVec<T> {15pub fn with_capacity(len: usize) -> Self {16let init_mask = (0..len.div_ceil(8)).map(|_| AtomicU8::new(0)).collect();17let mut storage = Vec::with_capacity(len);18let cap = storage.capacity();19let ptr = storage.as_mut_ptr();20core::mem::forget(storage);21Self {22len,23cap,24ptr,25num_init: AtomicUsize::new(0),26init_mask,27}28}2930pub fn try_set(&self, idx: usize, value: T) -> Result<(), T> {31unsafe {32if idx >= self.len {33return Err(value);34}3536// SAFETY: we use Relaxed orderings as we only ever read data back in methods that take37// self mutably or owned, already implying synchronization.38let init_mask_byte = self.init_mask.get_unchecked(idx / 8);39let bit_mask = 1 << (idx % 8);40if init_mask_byte.fetch_or(bit_mask, Ordering::Relaxed) & bit_mask != 0 {41return Err(value);42}4344self.ptr.add(idx).write(value);45self.num_init.fetch_add(1, Ordering::Relaxed);46}4748Ok(())49}5051pub fn try_assume_init(mut self) -> Result<Vec<T>, Self> {52unsafe {53if *self.num_init.get_mut() == self.len {54let ret = Vec::from_raw_parts(self.ptr, self.len, self.cap);55drop(core::mem::take(&mut self.init_mask));56core::mem::forget(self);57Ok(ret)58} else {59Err(self)60}61}62}63}6465impl<T> Drop for SparseInitVec<T> {66fn drop(&mut self) {67unsafe {68// Make sure storage gets dropped even if element drop panics.69let _storage = Vec::from_raw_parts(self.ptr, 0, self.cap);7071for idx in 0..self.len {72let init_mask_byte = self.init_mask.get_unchecked_mut(idx / 8);73let bit_mask = 1 << (idx % 8);74if *init_mask_byte.get_mut() & bit_mask != 0 {75self.ptr.add(idx).drop_in_place();76}77}78}79}80}818283