use std::cell::UnsafeCell;
use std::hint;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
const UNLOCKED: bool = false;
const LOCKED: bool = true;
#[repr(align(128))]
pub struct SpinLock<T: ?Sized> {
lock: AtomicBool,
value: UnsafeCell<T>,
}
impl<T> SpinLock<T> {
pub fn new(value: T) -> SpinLock<T> {
SpinLock {
lock: AtomicBool::new(UNLOCKED),
value: UnsafeCell::new(value),
}
}
pub fn into_inner(self) -> T {
self.value.into_inner()
}
}
impl<T: ?Sized> SpinLock<T> {
pub fn lock(&self) -> SpinLockGuard<T> {
loop {
let state = self.lock.load(Ordering::Relaxed);
if state == UNLOCKED
&& self
.lock
.compare_exchange_weak(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
break;
}
hint::spin_loop();
}
#[allow(clippy::undocumented_unsafe_blocks)]
SpinLockGuard {
lock: self,
value: unsafe { &mut *self.value.get() },
}
}
fn unlock(&self) {
self.lock.store(UNLOCKED, Ordering::Release);
}
pub fn get_mut(&mut self) -> &mut T {
unsafe { &mut *self.value.get() }
}
}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<T: ?Sized + Send> Send for SpinLock<T> {}
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<T: ?Sized + Send> Sync for SpinLock<T> {}
impl<T: Default> Default for SpinLock<T> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T> From<T> for SpinLock<T> {
fn from(source: T) -> Self {
Self::new(source)
}
}
pub struct SpinLockGuard<'a, T: 'a + ?Sized> {
lock: &'a SpinLock<T>,
value: &'a mut T,
}
impl<T: ?Sized> Deref for SpinLockGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
self.value
}
}
impl<T: ?Sized> DerefMut for SpinLockGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
self.value
}
}
impl<T: ?Sized> Drop for SpinLockGuard<'_, T> {
fn drop(&mut self) {
self.lock.unlock();
}
}
#[cfg(test)]
mod test {
use std::mem;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::thread;
use super::*;
#[derive(PartialEq, Eq, Debug)]
struct NonCopy(u32);
#[test]
fn it_works() {
let sl = SpinLock::new(NonCopy(13));
assert_eq!(*sl.lock(), NonCopy(13));
}
#[test]
fn smoke() {
let sl = SpinLock::new(NonCopy(7));
mem::drop(sl.lock());
mem::drop(sl.lock());
}
#[test]
fn send() {
let sl = SpinLock::new(NonCopy(19));
thread::spawn(move || {
let value = sl.lock();
assert_eq!(*value, NonCopy(19));
})
.join()
.unwrap();
}
#[test]
fn high_contention() {
const THREADS: usize = 23;
const ITERATIONS: usize = 101;
let mut threads = Vec::with_capacity(THREADS);
let sl = Arc::new(SpinLock::new(0usize));
for _ in 0..THREADS {
let sl2 = sl.clone();
threads.push(thread::spawn(move || {
for _ in 0..ITERATIONS {
*sl2.lock() += 1;
}
}));
}
for t in threads.into_iter() {
t.join().unwrap();
}
assert_eq!(*sl.lock(), THREADS * ITERATIONS);
}
#[test]
fn get_mut() {
let mut sl = SpinLock::new(NonCopy(13));
*sl.get_mut() = NonCopy(17);
assert_eq!(sl.into_inner(), NonCopy(17));
}
#[test]
fn into_inner() {
let sl = SpinLock::new(NonCopy(29));
assert_eq!(sl.into_inner(), NonCopy(29));
}
#[test]
fn into_inner_drop() {
struct NeedsDrop(Arc<AtomicUsize>);
impl Drop for NeedsDrop {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::AcqRel);
}
}
let value = Arc::new(AtomicUsize::new(0));
let needs_drop = SpinLock::new(NeedsDrop(value.clone()));
assert_eq!(value.load(Ordering::Acquire), 0);
{
let inner = needs_drop.into_inner();
assert_eq!(inner.0.load(Ordering::Acquire), 0);
}
assert_eq!(value.load(Ordering::Acquire), 1);
}
#[test]
fn arc_nested() {
let sl = SpinLock::new(1);
let arc = Arc::new(SpinLock::new(sl));
thread::spawn(move || {
let nested = arc.lock();
let lock2 = nested.lock();
assert_eq!(*lock2, 1);
})
.join()
.unwrap();
}
#[test]
fn arc_access_in_unwind() {
let arc = Arc::new(SpinLock::new(1));
let arc2 = arc.clone();
thread::spawn(move || {
struct Unwinder {
i: Arc<SpinLock<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join()
.expect_err("thread did not panic");
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn unsized_value() {
let sltex: &SpinLock<[i32]> = &SpinLock::new([1, 2, 3]);
{
let b = &mut *sltex.lock();
b[0] = 4;
b[2] = 5;
}
let expected: &[i32] = &[4, 2, 5];
assert_eq!(&*sltex.lock(), expected);
}
}