Path: blob/main/sys/contrib/ck/include/ck_rwcohort.h
48255 views
/*1* Copyright 2013-2015 Samy Al Bahra.2* Copyright 2013 Brendon Scheinman.3* All rights reserved.4*5* Redistribution and use in source and binary forms, with or without6* modification, are permitted provided that the following conditions7* are met:8* 1. Redistributions of source code must retain the above copyright9* notice, this list of conditions and the following disclaimer.10* 2. Redistributions in binary form must reproduce the above copyright11* notice, this list of conditions and the following disclaimer in the12* documentation and/or other materials provided with the distribution.13*14* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND15* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE16* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE17* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE18* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL19* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS20* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)21* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT22* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY23* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF24* SUCH DAMAGE.25*/2627#ifndef CK_RWCOHORT_H28#define CK_RWCOHORT_H2930/*31* This is an implementation of NUMA-aware reader-writer locks as described in:32* Calciu, I.; Dice, D.; Lev, Y.; Luchangco, V.; Marathe, V.; and Shavit, N. 2014.33* NUMA-Aware Reader-Writer Locks34*/3536#include <ck_cc.h>37#include <ck_pr.h>38#include <ck_stddef.h>39#include <ck_cohort.h>4041#define CK_RWCOHORT_WP_NAME(N) ck_rwcohort_wp_##N42#define CK_RWCOHORT_WP_INSTANCE(N) struct CK_RWCOHORT_WP_NAME(N)43#define CK_RWCOHORT_WP_INIT(N, RW, WL) ck_rwcohort_wp_##N##_init(RW, WL)44#define CK_RWCOHORT_WP_READ_LOCK(N, RW, C, GC, LC) \45ck_rwcohort_wp_##N##_read_lock(RW, C, GC, LC)46#define CK_RWCOHORT_WP_READ_UNLOCK(N, RW, C, GC, LC) \47ck_rwcohort_wp_##N##_read_unlock(RW)48#define CK_RWCOHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) \49ck_rwcohort_wp_##N##_write_lock(RW, C, GC, LC)50#define CK_RWCOHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC) \51ck_rwcohort_wp_##N##_write_unlock(RW, C, GC, LC)52#define CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT 10005354#define CK_RWCOHORT_WP_PROTOTYPE(N) \55CK_RWCOHORT_WP_INSTANCE(N) { \56unsigned int read_counter; \57unsigned int write_barrier; \58unsigned int wait_limit; \59}; \60CK_CC_INLINE static void \61ck_rwcohort_wp_##N##_init(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \62unsigned int wait_limit) \63{ \64\65rw_cohort->read_counter = 0; \66rw_cohort->write_barrier = 0; \67rw_cohort->wait_limit = wait_limit; \68ck_pr_barrier(); \69return; \70} \71CK_CC_INLINE static void \72ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \73CK_COHORT_INSTANCE(N) *cohort, void *global_context, \74void *local_context) \75{ \76\77while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0) \78ck_pr_stall(); \79\80CK_COHORT_LOCK(N, cohort, global_context, local_context); \81\82while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) \83ck_pr_stall(); \84\85return; \86} \87CK_CC_INLINE static void \88ck_rwcohort_wp_##N##_write_unlock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \89CK_COHORT_INSTANCE(N) *cohort, void *global_context, \90void *local_context) \91{ \92\93(void)rw_cohort; \94CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \95return; \96} \97CK_CC_INLINE static void \98ck_rwcohort_wp_##N##_read_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \99CK_COHORT_INSTANCE(N) *cohort, void *global_context, \100void *local_context) \101{ \102unsigned int wait_count = 0; \103bool raised = false; \104\105for (;;) { \106ck_pr_inc_uint(&rw_cohort->read_counter); \107ck_pr_fence_atomic_load(); \108if (CK_COHORT_LOCKED(N, cohort, global_context, \109local_context) == false) \110break; \111\112ck_pr_dec_uint(&rw_cohort->read_counter); \113while (CK_COHORT_LOCKED(N, cohort, global_context, \114local_context) == true) { \115ck_pr_stall(); \116if (++wait_count > rw_cohort->wait_limit && \117raised == false) { \118ck_pr_inc_uint(&rw_cohort->write_barrier); \119raised = true; \120} \121} \122} \123\124if (raised == true) \125ck_pr_dec_uint(&rw_cohort->write_barrier); \126\127ck_pr_fence_load(); \128return; \129} \130CK_CC_INLINE static void \131ck_rwcohort_wp_##N##_read_unlock(CK_RWCOHORT_WP_INSTANCE(N) *cohort) \132{ \133\134ck_pr_fence_load_atomic(); \135ck_pr_dec_uint(&cohort->read_counter); \136return; \137}138139#define CK_RWCOHORT_WP_INITIALIZER { \140.read_counter = 0, \141.write_barrier = 0, \142.wait_limit = 0 \143}144145#define CK_RWCOHORT_RP_NAME(N) ck_rwcohort_rp_##N146#define CK_RWCOHORT_RP_INSTANCE(N) struct CK_RWCOHORT_RP_NAME(N)147#define CK_RWCOHORT_RP_INIT(N, RW, WL) ck_rwcohort_rp_##N##_init(RW, WL)148#define CK_RWCOHORT_RP_READ_LOCK(N, RW, C, GC, LC) \149ck_rwcohort_rp_##N##_read_lock(RW, C, GC, LC)150#define CK_RWCOHORT_RP_READ_UNLOCK(N, RW, C, GC, LC) \151ck_rwcohort_rp_##N##_read_unlock(RW)152#define CK_RWCOHORT_RP_WRITE_LOCK(N, RW, C, GC, LC) \153ck_rwcohort_rp_##N##_write_lock(RW, C, GC, LC)154#define CK_RWCOHORT_RP_WRITE_UNLOCK(N, RW, C, GC, LC) \155ck_rwcohort_rp_##N##_write_unlock(RW, C, GC, LC)156#define CK_RWCOHORT_RP_DEFAULT_WAIT_LIMIT 1000157158#define CK_RWCOHORT_RP_PROTOTYPE(N) \159CK_RWCOHORT_RP_INSTANCE(N) { \160unsigned int read_counter; \161unsigned int read_barrier; \162unsigned int wait_limit; \163}; \164CK_CC_INLINE static void \165ck_rwcohort_rp_##N##_init(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \166unsigned int wait_limit) \167{ \168\169rw_cohort->read_counter = 0; \170rw_cohort->read_barrier = 0; \171rw_cohort->wait_limit = wait_limit; \172ck_pr_barrier(); \173return; \174} \175CK_CC_INLINE static void \176ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \177CK_COHORT_INSTANCE(N) *cohort, void *global_context, \178void *local_context) \179{ \180unsigned int wait_count = 0; \181bool raised = false; \182\183for (;;) { \184CK_COHORT_LOCK(N, cohort, global_context, local_context); \185if (ck_pr_load_uint(&rw_cohort->read_counter) == 0) \186break; \187\188CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \189while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \190ck_pr_stall(); \191if (++wait_count > rw_cohort->wait_limit && \192raised == false) { \193ck_pr_inc_uint(&rw_cohort->read_barrier); \194raised = true; \195} \196} \197} \198\199if (raised == true) \200ck_pr_dec_uint(&rw_cohort->read_barrier); \201\202return; \203} \204CK_CC_INLINE static void \205ck_rwcohort_rp_##N##_write_unlock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \206CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \207{ \208\209(void)rw_cohort; \210CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \211return; \212} \213CK_CC_INLINE static void \214ck_rwcohort_rp_##N##_read_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \215CK_COHORT_INSTANCE(N) *cohort, void *global_context, \216void *local_context) \217{ \218\219while (ck_pr_load_uint(&rw_cohort->read_barrier) > 0) \220ck_pr_stall(); \221\222ck_pr_inc_uint(&rw_cohort->read_counter); \223ck_pr_fence_atomic_load(); \224\225while (CK_COHORT_LOCKED(N, cohort, global_context, \226local_context) == true) \227ck_pr_stall(); \228\229return; \230} \231CK_CC_INLINE static void \232ck_rwcohort_rp_##N##_read_unlock(CK_RWCOHORT_RP_INSTANCE(N) *cohort) \233{ \234\235ck_pr_fence_load_atomic(); \236ck_pr_dec_uint(&cohort->read_counter); \237return; \238}239240#define CK_RWCOHORT_RP_INITIALIZER { \241.read_counter = 0, \242.read_barrier = 0, \243.wait_limit = 0 \244}245246#define CK_RWCOHORT_NEUTRAL_NAME(N) ck_rwcohort_neutral_##N247#define CK_RWCOHORT_NEUTRAL_INSTANCE(N) struct CK_RWCOHORT_NEUTRAL_NAME(N)248#define CK_RWCOHORT_NEUTRAL_INIT(N, RW) ck_rwcohort_neutral_##N##_init(RW)249#define CK_RWCOHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC) \250ck_rwcohort_neutral_##N##_read_lock(RW, C, GC, LC)251#define CK_RWCOHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC) \252ck_rwcohort_neutral_##N##_read_unlock(RW)253#define CK_RWCOHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC) \254ck_rwcohort_neutral_##N##_write_lock(RW, C, GC, LC)255#define CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK(N, RW, C, GC, LC) \256ck_rwcohort_neutral_##N##_write_unlock(RW, C, GC, LC)257#define CK_RWCOHORT_NEUTRAL_DEFAULT_WAIT_LIMIT 1000258259#define CK_RWCOHORT_NEUTRAL_PROTOTYPE(N) \260CK_RWCOHORT_NEUTRAL_INSTANCE(N) { \261unsigned int read_counter; \262}; \263CK_CC_INLINE static void \264ck_rwcohort_neutral_##N##_init(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort) \265{ \266\267rw_cohort->read_counter = 0; \268ck_pr_barrier(); \269return; \270} \271CK_CC_INLINE static void \272ck_rwcohort_neutral_##N##_write_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\273CK_COHORT_INSTANCE(N) *cohort, void *global_context, \274void *local_context) \275{ \276\277CK_COHORT_LOCK(N, cohort, global_context, local_context); \278while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \279ck_pr_stall(); \280} \281return; \282} \283CK_CC_INLINE static void \284ck_rwcohort_neutral_##N##_write_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\285CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \286{ \287\288(void)rw_cohort; \289CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \290return; \291} \292CK_CC_INLINE static void \293ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \294CK_COHORT_INSTANCE(N) *cohort, void *global_context, \295void *local_context) \296{ \297\298CK_COHORT_LOCK(N, cohort, global_context, local_context); \299ck_pr_inc_uint(&rw_cohort->read_counter); \300CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \301return; \302} \303CK_CC_INLINE static void \304ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \305{ \306\307ck_pr_fence_load_atomic(); \308ck_pr_dec_uint(&cohort->read_counter); \309return; \310}311312#define CK_RWCOHORT_NEUTRAL_INITIALIZER { \313.read_counter = 0, \314}315316#endif /* CK_RWCOHORT_H */317318319