/*1* Copyright 2011-2015 Samy Al Bahra.2* All rights reserved.3*4* Redistribution and use in source and binary forms, with or without5* modification, are permitted provided that the following conditions6* are met:7* 1. Redistributions of source code must retain the above copyright8* notice, this list of conditions and the following disclaimer.9* 2. Redistributions in binary form must reproduce the above copyright10* notice, this list of conditions and the following disclaimer in the11* documentation and/or other materials provided with the distribution.12*13* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND14* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE15* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE16* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE17* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL18* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS19* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)20* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT21* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY22* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF23* SUCH DAMAGE.24*/2526#ifndef CK_EPOCH_H27#define CK_EPOCH_H2829/*30* The implementation here is inspired from the work described in:31* Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University32* of Cambridge Computing Laboratory.33*/3435#include <ck_cc.h>36#include <ck_md.h>37#include <ck_pr.h>38#include <ck_stack.h>39#include <ck_stdbool.h>4041#ifndef CK_EPOCH_LENGTH42#define CK_EPOCH_LENGTH 443#endif4445/*46* This is used for sense detection with-respect to concurrent47* epoch sections.48*/49#define CK_EPOCH_SENSE (2)5051struct ck_epoch_entry;52typedef struct ck_epoch_entry ck_epoch_entry_t;53typedef void ck_epoch_cb_t(ck_epoch_entry_t *);5455/*56* This should be embedded into objects you wish to be the target of57* ck_epoch_cb_t functions (with ck_epoch_call).58*/59struct ck_epoch_entry {60ck_epoch_cb_t *function;61ck_stack_entry_t stack_entry;62};6364/*65* A section object may be passed to every begin-end pair to allow for66* forward progress guarantees with-in prolonged active sections.67*/68struct ck_epoch_section {69unsigned int bucket;70};71typedef struct ck_epoch_section ck_epoch_section_t;7273/*74* Return pointer to ck_epoch_entry container object.75*/76#define CK_EPOCH_CONTAINER(T, M, N) \77CK_CC_CONTAINER(struct ck_epoch_entry, T, M, N)7879struct ck_epoch_ref {80unsigned int epoch;81unsigned int count;82};8384struct ck_epoch_record {85ck_stack_entry_t record_next;86struct ck_epoch *global;87unsigned int state;88unsigned int epoch;89unsigned int active;90struct {91struct ck_epoch_ref bucket[CK_EPOCH_SENSE];92} local CK_CC_CACHELINE;93unsigned int n_pending;94unsigned int n_peak;95unsigned int n_dispatch;96void *ct;97ck_stack_t pending[CK_EPOCH_LENGTH];98} CK_CC_CACHELINE;99typedef struct ck_epoch_record ck_epoch_record_t;100101struct ck_epoch {102unsigned int epoch;103unsigned int n_free;104ck_stack_t records;105};106typedef struct ck_epoch ck_epoch_t;107108/*109* Internal functions.110*/111void _ck_epoch_addref(ck_epoch_record_t *, ck_epoch_section_t *);112bool _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *);113114CK_CC_FORCE_INLINE static void *115ck_epoch_record_ct(const ck_epoch_record_t *record)116{117118return ck_pr_load_ptr(&record->ct);119}120121/*122* Marks the beginning of an epoch-protected section.123*/124CK_CC_FORCE_INLINE static void125ck_epoch_begin(ck_epoch_record_t *record, ck_epoch_section_t *section)126{127struct ck_epoch *epoch = record->global;128129/*130* Only observe new epoch if thread is not recursing into a read131* section.132*/133if (record->active == 0) {134unsigned int g_epoch;135136/*137* It is possible for loads to be re-ordered before the store138* is committed into the caller's epoch and active fields.139* For this reason, store to load serialization is necessary.140*/141#if defined(CK_MD_TSO)142ck_pr_fas_uint(&record->active, 1);143ck_pr_fence_atomic_load();144#else145ck_pr_store_uint(&record->active, 1);146ck_pr_fence_memory();147#endif148149/*150* This load is allowed to be re-ordered prior to setting151* active flag due to monotonic nature of the global epoch.152* However, stale values lead to measurable performance153* degradation in some torture tests so we disallow early load154* of global epoch.155*/156g_epoch = ck_pr_load_uint(&epoch->epoch);157ck_pr_store_uint(&record->epoch, g_epoch);158} else {159ck_pr_store_uint(&record->active, record->active + 1);160}161162if (section != NULL)163_ck_epoch_addref(record, section);164165return;166}167168/*169* Marks the end of an epoch-protected section. Returns true if no more170* sections exist for the caller.171*/172CK_CC_FORCE_INLINE static bool173ck_epoch_end(ck_epoch_record_t *record, ck_epoch_section_t *section)174{175176ck_pr_fence_release();177ck_pr_store_uint(&record->active, record->active - 1);178179if (section != NULL)180return _ck_epoch_delref(record, section);181182return record->active == 0;183}184185/*186* Defers the execution of the function pointed to by the "cb"187* argument until an epoch counter loop. This allows for a188* non-blocking deferral.189*190* We can get away without a fence here due to the monotonic nature191* of the epoch counter. Worst case, this will result in some delays192* before object destruction.193*/194CK_CC_FORCE_INLINE static void195ck_epoch_call(ck_epoch_record_t *record,196ck_epoch_entry_t *entry,197ck_epoch_cb_t *function)198{199struct ck_epoch *epoch = record->global;200unsigned int e = ck_pr_load_uint(&epoch->epoch);201unsigned int offset = e & (CK_EPOCH_LENGTH - 1);202203record->n_pending++;204entry->function = function;205ck_stack_push_spnc(&record->pending[offset], &entry->stack_entry);206return;207}208209/*210* Same as ck_epoch_call, but allows for records to be shared and is reentrant.211*/212CK_CC_FORCE_INLINE static void213ck_epoch_call_strict(ck_epoch_record_t *record,214ck_epoch_entry_t *entry,215ck_epoch_cb_t *function)216{217struct ck_epoch *epoch = record->global;218unsigned int e = ck_pr_load_uint(&epoch->epoch);219unsigned int offset = e & (CK_EPOCH_LENGTH - 1);220221ck_pr_inc_uint(&record->n_pending);222entry->function = function;223224/* Store fence is implied by push operation. */225ck_stack_push_upmc(&record->pending[offset], &entry->stack_entry);226return;227}228229/*230* This callback is used for synchronize_wait to allow for custom blocking231* behavior.232*/233typedef void ck_epoch_wait_cb_t(ck_epoch_t *, ck_epoch_record_t *,234void *);235236/*237* Return latest epoch value. This operation provides load ordering.238*/239CK_CC_FORCE_INLINE static unsigned int240ck_epoch_value(const ck_epoch_t *ep)241{242243ck_pr_fence_load();244return ck_pr_load_uint(&ep->epoch);245}246247void ck_epoch_init(ck_epoch_t *);248249/*250* Attempts to recycle an unused epoch record. If one is successfully251* allocated, the record context pointer is also updated.252*/253ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *, void *);254255/*256* Registers an epoch record. An optional context pointer may be passed that257* is retrievable with ck_epoch_record_ct.258*/259void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *, void *);260261/*262* Marks a record as available for re-use by a subsequent recycle operation.263* Note that the record cannot be physically destroyed.264*/265void ck_epoch_unregister(ck_epoch_record_t *);266267bool ck_epoch_poll(ck_epoch_record_t *);268bool ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred);269void ck_epoch_synchronize(ck_epoch_record_t *);270void ck_epoch_synchronize_wait(ck_epoch_t *, ck_epoch_wait_cb_t *, void *);271void ck_epoch_barrier(ck_epoch_record_t *);272void ck_epoch_barrier_wait(ck_epoch_record_t *, ck_epoch_wait_cb_t *, void *);273274/*275* Reclaim entries associated with a record. This is safe to call only on276* the caller's record or records that are using call_strict.277*/278void ck_epoch_reclaim(ck_epoch_record_t *);279280#endif /* CK_EPOCH_H */281282283