Path: blob/main/sys/contrib/ck/include/ck_bytelock.h
48255 views
/*1* Copyright 2010-2015 Samy Al Bahra.2* All rights reserved.3*4* Redistribution and use in source and binary forms, with or without5* modification, are permitted provided that the following conditions6* are met:7* 1. Redistributions of source code must retain the above copyright8* notice, this list of conditions and the following disclaimer.9* 2. Redistributions in binary form must reproduce the above copyright10* notice, this list of conditions and the following disclaimer in the11* documentation and/or other materials provided with the distribution.12*13* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND14* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE15* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE16* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE17* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL18* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS19* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)20* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT21* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY22* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF23* SUCH DAMAGE.24*/2526#ifndef CK_BYTELOCK_H27#define CK_BYTELOCK_H2829/*30* The implementations here are derived from the work described in:31* Dice, D. and Shavit, N. 2010. TLRW: return of the read-write lock.32* In Proceedings of the 22nd ACM Symposium on Parallelism in Algorithms33* and Architectures (Thira, Santorini, Greece, June 13 - 15, 2010).34* SPAA '10. ACM, New York, NY, 284-293.35*/3637#include <ck_cc.h>38#include <ck_md.h>39#include <ck_pr.h>40#include <ck_stdbool.h>41#include <ck_stddef.h>42#include <ck_limits.h>4344struct ck_bytelock {45unsigned int owner;46unsigned int n_readers;47uint8_t readers[CK_MD_CACHELINE - sizeof(unsigned int) * 2] CK_CC_ALIGN(8);48};49typedef struct ck_bytelock ck_bytelock_t;5051#define CK_BYTELOCK_INITIALIZER { 0, 0, {0} }52#define CK_BYTELOCK_UNSLOTTED UINT_MAX5354CK_CC_INLINE static void55ck_bytelock_init(struct ck_bytelock *bytelock)56{57unsigned int i;5859bytelock->owner = 0;60bytelock->n_readers = 0;61for (i = 0; i < sizeof bytelock->readers; i++)62bytelock->readers[i] = false;6364ck_pr_barrier();65return;66}6768#ifdef CK_F_PR_LOAD_6469#define CK_BYTELOCK_LENGTH sizeof(uint64_t)70#define CK_BYTELOCK_LOAD ck_pr_load_6471#define CK_BYTELOCK_TYPE uint64_t72#elif defined(CK_F_PR_LOAD_32)73#define CK_BYTELOCK_LENGTH sizeof(uint32_t)74#define CK_BYTELOCK_LOAD ck_pr_load_3275#define CK_BYTELOCK_TYPE uint32_t76#else77#error Unsupported platform.78#endif7980CK_CC_INLINE static void81ck_bytelock_write_lock(struct ck_bytelock *bytelock, unsigned int slot)82{83CK_BYTELOCK_TYPE *readers = (void *)bytelock->readers;84unsigned int i;8586/* Announce upcoming writer acquisition. */87while (ck_pr_cas_uint(&bytelock->owner, 0, slot) == false)88ck_pr_stall();8990/* If we are slotted, we might be upgrading from a read lock. */91if (slot <= sizeof bytelock->readers)92ck_pr_store_8(&bytelock->readers[slot - 1], false);9394/*95* Wait for slotted readers to drain out. This also provides the96* lock acquire semantics.97*/98ck_pr_fence_atomic_load();99100for (i = 0; i < sizeof(bytelock->readers) / CK_BYTELOCK_LENGTH; i++) {101while (CK_BYTELOCK_LOAD(&readers[i]) != false)102ck_pr_stall();103}104105/* Wait for unslotted readers to drain out. */106while (ck_pr_load_uint(&bytelock->n_readers) != 0)107ck_pr_stall();108109ck_pr_fence_lock();110return;111}112113#undef CK_BYTELOCK_LENGTH114#undef CK_BYTELOCK_LOAD115#undef CK_BYTELOCK_TYPE116117CK_CC_INLINE static void118ck_bytelock_write_unlock(struct ck_bytelock *bytelock)119{120121ck_pr_fence_unlock();122ck_pr_store_uint(&bytelock->owner, 0);123return;124}125126CK_CC_INLINE static void127ck_bytelock_read_lock(struct ck_bytelock *bytelock, unsigned int slot)128{129130if (ck_pr_load_uint(&bytelock->owner) == slot) {131ck_pr_store_8(&bytelock->readers[slot - 1], true);132ck_pr_fence_strict_store();133ck_pr_store_uint(&bytelock->owner, 0);134return;135}136137/* Unslotted threads will have to use the readers counter. */138if (slot > sizeof bytelock->readers) {139for (;;) {140ck_pr_inc_uint(&bytelock->n_readers);141ck_pr_fence_atomic_load();142if (ck_pr_load_uint(&bytelock->owner) == 0)143break;144ck_pr_dec_uint(&bytelock->n_readers);145146while (ck_pr_load_uint(&bytelock->owner) != 0)147ck_pr_stall();148}149150ck_pr_fence_lock();151return;152}153154slot -= 1;155for (;;) {156#ifdef CK_F_PR_FAA_8157ck_pr_fas_8(&bytelock->readers[slot], true);158ck_pr_fence_atomic_load();159#else160ck_pr_store_8(&bytelock->readers[slot], true);161ck_pr_fence_store_load();162#endif163164/*165* If there is no owner at this point, our slot has166* already been published and it is guaranteed no167* write acquisition will succeed until we drain out.168*/169if (ck_pr_load_uint(&bytelock->owner) == 0)170break;171172ck_pr_store_8(&bytelock->readers[slot], false);173while (ck_pr_load_uint(&bytelock->owner) != 0)174ck_pr_stall();175}176177ck_pr_fence_lock();178return;179}180181CK_CC_INLINE static void182ck_bytelock_read_unlock(struct ck_bytelock *bytelock, unsigned int slot)183{184185ck_pr_fence_unlock();186187if (slot > sizeof bytelock->readers)188ck_pr_dec_uint(&bytelock->n_readers);189else190ck_pr_store_8(&bytelock->readers[slot - 1], false);191192return;193}194195#endif /* CK_BYTELOCK_H */196197198