Path: blob/main/contrib/llvm-project/libcxx/include/__stop_token/stop_state.h
35236 views
// -*- C++ -*-1//===----------------------------------------------------------------------===//2//3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.4// See https://llvm.org/LICENSE.txt for license information.5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception6//7//===----------------------------------------------------------------------===//89#ifndef _LIBCPP___STOP_TOKEN_STOP_STATE_H10#define _LIBCPP___STOP_TOKEN_STOP_STATE_H1112#include <__assert>13#include <__config>14#include <__stop_token/atomic_unique_lock.h>15#include <__stop_token/intrusive_list_view.h>16#include <__thread/id.h>17#include <atomic>18#include <cstdint>1920#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)21# pragma GCC system_header22#endif2324_LIBCPP_BEGIN_NAMESPACE_STD2526#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS)2728struct __stop_callback_base : __intrusive_node_base<__stop_callback_base> {29using __callback_fn_t = void(__stop_callback_base*) noexcept;30_LIBCPP_HIDE_FROM_ABI explicit __stop_callback_base(__callback_fn_t* __callback_fn) : __callback_fn_(__callback_fn) {}3132_LIBCPP_HIDE_FROM_ABI void __invoke() noexcept { __callback_fn_(this); }3334__callback_fn_t* __callback_fn_;35atomic<bool> __completed_ = false;36bool* __destroyed_ = nullptr;37};3839class __stop_state {40static constexpr uint32_t __stop_requested_bit = 1;41static constexpr uint32_t __callback_list_locked_bit = 1 << 1;42static constexpr uint32_t __stop_source_counter_shift = 2;4344// The "stop_source counter" is not used for lifetime reference counting.45// When the number of stop_source reaches 0, the remaining stop_tokens's46// stop_possible will return false. We need this counter to track this.47//48// The "callback list locked" bit implements the atomic_unique_lock to49// guard the operations on the callback list50//51// 31 - 2 | 1 | 0 |52// stop_source counter | callback list locked | stop_requested |53atomic<uint32_t> __state_ = 0;5455// Reference count for stop_token + stop_callback + stop_source56// When the counter reaches zero, the state is destroyed57// It is used by __intrusive_shared_ptr, but it is stored here for better layout58atomic<uint32_t> __ref_count_ = 0;5960using __state_t = uint32_t;61using __callback_list_lock = __atomic_unique_lock<__state_t, __callback_list_locked_bit>;62using __callback_list = __intrusive_list_view<__stop_callback_base>;6364__callback_list __callback_list_;65__thread_id __requesting_thread_;6667public:68_LIBCPP_HIDE_FROM_ABI __stop_state() noexcept = default;6970_LIBCPP_HIDE_FROM_ABI void __increment_stop_source_counter() noexcept {71_LIBCPP_ASSERT_UNCATEGORIZED(72__state_.load(std::memory_order_relaxed) <= static_cast<__state_t>(~(1 << __stop_source_counter_shift)),73"stop_source's counter reaches the maximum. Incrementing the counter will overflow");74__state_.fetch_add(1 << __stop_source_counter_shift, std::memory_order_relaxed);75}7677// We are not destroying the object after counter decrements to zero, nor do we have78// operations depend on the ordering of decrementing the counter. relaxed is enough.79_LIBCPP_HIDE_FROM_ABI void __decrement_stop_source_counter() noexcept {80_LIBCPP_ASSERT_UNCATEGORIZED(81__state_.load(std::memory_order_relaxed) >= static_cast<__state_t>(1 << __stop_source_counter_shift),82"stop_source's counter is 0. Decrementing the counter will underflow");83__state_.fetch_sub(1 << __stop_source_counter_shift, std::memory_order_relaxed);84}8586_LIBCPP_HIDE_FROM_ABI bool __stop_requested() const noexcept {87// acquire because [thread.stoptoken.intro] A call to request_stop that returns true88// synchronizes with a call to stop_requested on an associated stop_token or stop_source89// object that returns true.90// request_stop's compare_exchange_weak has release which syncs with this acquire91return (__state_.load(std::memory_order_acquire) & __stop_requested_bit) != 0;92}9394_LIBCPP_HIDE_FROM_ABI bool __stop_possible_for_stop_token() const noexcept {95// [stoptoken.mem] false if "a stop request was not made and there are no associated stop_source objects"96// Todo: Can this be std::memory_order_relaxed as the standard does not say anything except not to introduce data97// race?98__state_t __curent_state = __state_.load(std::memory_order_acquire);99return ((__curent_state & __stop_requested_bit) != 0) || ((__curent_state >> __stop_source_counter_shift) != 0);100}101102_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool __request_stop() noexcept {103auto __cb_list_lock = __try_lock_for_request_stop();104if (!__cb_list_lock.__owns_lock()) {105return false;106}107__requesting_thread_ = this_thread::get_id();108109while (!__callback_list_.__empty()) {110auto __cb = __callback_list_.__pop_front();111112// allow other callbacks to be removed while invoking the current callback113__cb_list_lock.__unlock();114115bool __destroyed = false;116__cb->__destroyed_ = &__destroyed;117118__cb->__invoke();119120// __cb's invoke function could potentially delete itself. We need to check before accessing __cb's member121if (!__destroyed) {122// needs to set __destroyed_ pointer to nullptr, otherwise it points to a local variable123// which is to be destroyed at the end of the loop124__cb->__destroyed_ = nullptr;125126// [stopcallback.cons] If callback is concurrently executing on another thread, then the return127// from the invocation of callback strongly happens before ([intro.races]) callback is destroyed.128// this release syncs with the acquire in the remove_callback129__cb->__completed_.store(true, std::memory_order_release);130__cb->__completed_.notify_all();131}132133__cb_list_lock.__lock();134}135136return true;137}138139_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool __add_callback(__stop_callback_base* __cb) noexcept {140// If it is already stop_requested. Do not try to request it again.141const auto __give_up_trying_to_lock_condition = [__cb](__state_t __state) {142if ((__state & __stop_requested_bit) != 0) {143// already stop requested, synchronously run the callback and no need to lock the list again144__cb->__invoke();145return true;146}147// no stop source. no need to lock the list to add the callback as it can never be invoked148return (__state >> __stop_source_counter_shift) == 0;149};150151__callback_list_lock __cb_list_lock(__state_, __give_up_trying_to_lock_condition);152153if (!__cb_list_lock.__owns_lock()) {154return false;155}156157__callback_list_.__push_front(__cb);158159return true;160// unlock here: [thread.stoptoken.intro] Registration of a callback synchronizes with the invocation of161// that callback.162// Note: this release sync with the acquire in the request_stop' __try_lock_for_request_stop163}164165// called by the destructor of stop_callback166_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __remove_callback(__stop_callback_base* __cb) noexcept {167__callback_list_lock __cb_list_lock(__state_);168169// under below condition, the request_stop call just popped __cb from the list and could execute it now170bool __potentially_executing_now = __cb->__prev_ == nullptr && !__callback_list_.__is_head(__cb);171172if (__potentially_executing_now) {173auto __requested_thread = __requesting_thread_;174__cb_list_lock.__unlock();175176if (std::this_thread::get_id() != __requested_thread) {177// [stopcallback.cons] If callback is concurrently executing on another thread, then the return178// from the invocation of callback strongly happens before ([intro.races]) callback is destroyed.179__cb->__completed_.wait(false, std::memory_order_acquire);180} else {181// The destructor of stop_callback runs on the same thread of the thread that invokes the callback.182// The callback is potentially invoking its own destuctor. Set the flag to avoid accessing destroyed183// members on the invoking side184if (__cb->__destroyed_) {185*__cb->__destroyed_ = true;186}187}188} else {189__callback_list_.__remove(__cb);190}191}192193private:194_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI __callback_list_lock __try_lock_for_request_stop() noexcept {195// If it is already stop_requested, do not try to request stop or lock the list again.196const auto __lock_fail_condition = [](__state_t __state) { return (__state & __stop_requested_bit) != 0; };197198// set locked and requested bit at the same time199const auto __after_lock_state = [](__state_t __state) {200return __state | __callback_list_locked_bit | __stop_requested_bit;201};202203// acq because [thread.stoptoken.intro] Registration of a callback synchronizes with the invocation of that204// callback. We are going to invoke the callback after getting the lock, acquire so that we can see the205// registration of a callback (and other writes that happens-before the add_callback)206// Note: the rel (unlock) in the add_callback syncs with this acq207// rel because [thread.stoptoken.intro] A call to request_stop that returns true synchronizes with a call208// to stop_requested on an associated stop_token or stop_source object that returns true.209// We need to make sure that all writes (including user code) before request_stop will be made visible210// to the threads that waiting for `stop_requested == true`211// Note: this rel syncs with the acq in `stop_requested`212const auto __locked_ordering = std::memory_order_acq_rel;213214return __callback_list_lock(__state_, __lock_fail_condition, __after_lock_state, __locked_ordering);215}216217template <class _Tp>218friend struct __intrusive_shared_ptr_traits;219};220221template <class _Tp>222struct __intrusive_shared_ptr_traits;223224template <>225struct __intrusive_shared_ptr_traits<__stop_state> {226_LIBCPP_HIDE_FROM_ABI static atomic<uint32_t>& __get_atomic_ref_count(__stop_state& __state) {227return __state.__ref_count_;228}229};230231#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS)232233_LIBCPP_END_NAMESPACE_STD234235#endif // _LIBCPP___STOP_TOKEN_STOP_STATE_H236237238