Path: blob/main/sys/contrib/ck/include/spinlock/hclh.h
48378 views
/*1* Copyright 2013-2015 Olivier Houchard2* Copyright 2010-2015 Samy Al Bahra.3* All rights reserved.4*5* Redistribution and use in source and binary forms, with or without6* modification, are permitted provided that the following conditions7* are met:8* 1. Redistributions of source code must retain the above copyright9* notice, this list of conditions and the following disclaimer.10* 2. Redistributions in binary form must reproduce the above copyright11* notice, this list of conditions and the following disclaimer in the12* documentation and/or other materials provided with the distribution.13*14* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND15* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE16* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE17* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE18* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL19* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS20* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)21* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT22* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY23* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF24* SUCH DAMAGE.25*/2627#ifndef CK_SPINLOCK_HCLH_H28#define CK_SPINLOCK_HCLH_H2930#include <ck_cc.h>31#include <ck_pr.h>32#include <ck_stdbool.h>33#include <ck_stddef.h>3435#ifndef CK_F_SPINLOCK_HCLH36#define CK_F_SPINLOCK_HCLH37struct ck_spinlock_hclh {38unsigned int wait;39unsigned int splice;40int cluster_id;41struct ck_spinlock_hclh *previous;42};43typedef struct ck_spinlock_hclh ck_spinlock_hclh_t;4445CK_CC_INLINE static void46ck_spinlock_hclh_init(struct ck_spinlock_hclh **lock,47struct ck_spinlock_hclh *unowned,48int cluster_id)49{5051unowned->previous = NULL;52unowned->wait = false;53unowned->splice = false;54unowned->cluster_id = cluster_id;55*lock = unowned;56ck_pr_barrier();57return;58}5960CK_CC_INLINE static bool61ck_spinlock_hclh_locked(struct ck_spinlock_hclh **queue)62{63struct ck_spinlock_hclh *head;64bool r;6566head = ck_pr_load_ptr(queue);67r = ck_pr_load_uint(&head->wait);68ck_pr_fence_acquire();69return r;70}7172CK_CC_INLINE static void73ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue,74struct ck_spinlock_hclh **local_queue,75struct ck_spinlock_hclh *thread)76{77struct ck_spinlock_hclh *previous, *local_tail;7879/* Indicate to the next thread on queue that they will have to block. */80thread->wait = true;81thread->splice = false;82thread->cluster_id = (*local_queue)->cluster_id;83/* Make sure previous->previous doesn't appear to be NULL */84thread->previous = *local_queue;8586/* Serialize with respect to update of local queue. */87ck_pr_fence_store_atomic();8889/* Mark current request as last request. Save reference to previous request. */90previous = ck_pr_fas_ptr(local_queue, thread);91thread->previous = previous;9293/* Wait until previous thread from the local queue is done with lock. */94ck_pr_fence_load();95if (previous->previous != NULL) {96while (ck_pr_load_uint(&previous->wait) == true &&97ck_pr_load_int(&previous->cluster_id) == thread->cluster_id &&98ck_pr_load_uint(&previous->splice) == false)99ck_pr_stall();100101/* We're head of the global queue, we're done */102if (ck_pr_load_int(&previous->cluster_id) == thread->cluster_id &&103ck_pr_load_uint(&previous->splice) == false)104return;105}106107/* Now we need to splice the local queue into the global queue. */108local_tail = ck_pr_load_ptr(local_queue);109previous = ck_pr_fas_ptr(glob_queue, local_tail);110111ck_pr_store_uint(&local_tail->splice, true);112113/* Wait until previous thread from the global queue is done with lock. */114while (ck_pr_load_uint(&previous->wait) == true)115ck_pr_stall();116117ck_pr_fence_lock();118return;119}120121CK_CC_INLINE static void122ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread)123{124struct ck_spinlock_hclh *previous;125126/*127* If there are waiters, they are spinning on the current node wait128* flag. The flag is cleared so that the successor may complete an129* acquisition. If the caller is pre-empted then the predecessor field130* may be updated by a successor's lock operation. In order to avoid131* this, save a copy of the predecessor before setting the flag.132*/133previous = thread[0]->previous;134135/* We have to pay this cost anyways, use it as a compiler barrier too. */136ck_pr_fence_unlock();137ck_pr_store_uint(&(*thread)->wait, false);138139/*140* Predecessor is guaranteed not to be spinning on previous request,141* so update caller to use previous structure. This allows successor142* all the time in the world to successfully read updated wait flag.143*/144*thread = previous;145return;146}147#endif /* CK_F_SPINLOCK_HCLH */148#endif /* CK_SPINLOCK_HCLH_H */149150151