Path: blob/main/system/lib/llvm-libc/shared/rpc.h
6169 views
//===-- Shared memory RPC client / server interface -------------*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file implements a remote procedure call mechanism to communicate between9// heterogeneous devices that can share an address space atomically. We provide10// a client and a server to facilitate the remote call. The client makes request11// to the server using a shared communication channel. We use separate atomic12// signals to indicate which side, the client or the server is in ownership of13// the buffer.14//15//===----------------------------------------------------------------------===//1617#ifndef LLVM_LIBC_SHARED_RPC_H18#define LLVM_LIBC_SHARED_RPC_H1920#include "rpc_util.h"2122namespace rpc {2324/// Use scoped atomic variants if they are available for the target.25#if !__has_builtin(__scoped_atomic_load_n)26#define __scoped_atomic_load_n(src, ord, scp) __atomic_load_n(src, ord)27#define __scoped_atomic_store_n(dst, src, ord, scp) \28__atomic_store_n(dst, src, ord)29#define __scoped_atomic_fetch_or(src, val, ord, scp) \30__atomic_fetch_or(src, val, ord)31#define __scoped_atomic_fetch_and(src, val, ord, scp) \32__atomic_fetch_and(src, val, ord)33#endif34#if !__has_builtin(__scoped_atomic_thread_fence)35#define __scoped_atomic_thread_fence(ord, scp) __atomic_thread_fence(ord)36#endif3738/// Generic codes that can be used whem implementing the server.39enum Status {40RPC_SUCCESS = 0x0,41RPC_ERROR = 0x1000,42RPC_UNHANDLED_OPCODE = 0x1001,43};4445/// A fixed size channel used to communicate between the RPC client and server.46struct Buffer {47uint64_t data[8];48};49static_assert(sizeof(Buffer) == 64, "Buffer size mismatch");5051/// The information associated with a packet. This indicates which operations to52/// perform and which threads are active in the slots.53struct Header {54uint64_t mask;55uint32_t opcode;56};5758/// The maximum number of parallel ports that the RPC interface can support.59constexpr static uint64_t MAX_PORT_COUNT = 4096;6061/// A common process used to synchronize communication between a client and a62/// server. The process contains a read-only inbox and a write-only outbox used63/// for signaling ownership of the shared buffer between both sides. We assign64/// ownership of the buffer to the client if the inbox and outbox bits match,65/// otherwise it is owned by the server.66///67/// This process is designed to allow the client and the server to exchange data68/// using a fixed size packet in a mostly arbitrary order using the 'send' and69/// 'recv' operations. The following restrictions to this scheme apply:70/// - The client will always start with a 'send' operation.71/// - The server will always start with a 'recv' operation.72/// - Every 'send' or 'recv' call is mirrored by the other process.73template <bool Invert> struct Process {74RPC_ATTRS Process() = default;75RPC_ATTRS Process(const Process &) = delete;76RPC_ATTRS Process &operator=(const Process &) = delete;77RPC_ATTRS Process(Process &&) = default;78RPC_ATTRS Process &operator=(Process &&) = default;79RPC_ATTRS ~Process() = default;8081const uint32_t port_count = 0;82const uint32_t *const inbox = nullptr;83uint32_t *const outbox = nullptr;84Header *const header = nullptr;85Buffer *const packet = nullptr;8687static constexpr uint64_t NUM_BITS_IN_WORD = sizeof(uint32_t) * 8;88uint32_t lock[MAX_PORT_COUNT / NUM_BITS_IN_WORD] = {0};8990RPC_ATTRS Process(uint32_t port_count, void *buffer)91: port_count(port_count), inbox(reinterpret_cast<uint32_t *>(92advance(buffer, inbox_offset(port_count)))),93outbox(reinterpret_cast<uint32_t *>(94advance(buffer, outbox_offset(port_count)))),95header(reinterpret_cast<Header *>(96advance(buffer, header_offset(port_count)))),97packet(reinterpret_cast<Buffer *>(98advance(buffer, buffer_offset(port_count)))) {}99100/// Allocate a memory buffer sufficient to store the following equivalent101/// representation in memory.102///103/// struct Equivalent {104/// Atomic<uint32_t> primary[port_count];105/// Atomic<uint32_t> secondary[port_count];106/// Header header[port_count];107/// Buffer packet[port_count][lane_size];108/// };109RPC_ATTRS static constexpr uint64_t allocation_size(uint32_t port_count,110uint32_t lane_size) {111return buffer_offset(port_count) + buffer_bytes(port_count, lane_size);112}113114/// Retrieve the inbox state from memory shared between processes.115RPC_ATTRS uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {116return rpc::broadcast_value(117lane_mask, __scoped_atomic_load_n(&inbox[index], __ATOMIC_RELAXED,118__MEMORY_SCOPE_SYSTEM));119}120121/// Retrieve the outbox state from memory shared between processes.122RPC_ATTRS uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {123return rpc::broadcast_value(124lane_mask, __scoped_atomic_load_n(&outbox[index], __ATOMIC_RELAXED,125__MEMORY_SCOPE_SYSTEM));126}127128/// Signal to the other process that this one is finished with the buffer.129/// Equivalent to loading outbox followed by store of the inverted value130/// The outbox is write only by this warp and tracking the value locally is131/// cheaper than calling load_outbox to get the value to store.132RPC_ATTRS uint32_t invert_outbox(uint32_t index, uint32_t current_outbox) {133uint32_t inverted_outbox = !current_outbox;134__scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM);135__scoped_atomic_store_n(&outbox[index], inverted_outbox, __ATOMIC_RELAXED,136__MEMORY_SCOPE_SYSTEM);137return inverted_outbox;138}139140// Given the current outbox and inbox values, wait until the inbox changes141// to indicate that this thread owns the buffer element.142RPC_ATTRS void wait_for_ownership(uint64_t lane_mask, uint32_t index,143uint32_t outbox, uint32_t in) {144while (buffer_unavailable(in, outbox)) {145sleep_briefly();146in = load_inbox(lane_mask, index);147}148__scoped_atomic_thread_fence(__ATOMIC_ACQUIRE, __MEMORY_SCOPE_SYSTEM);149}150151/// The packet is a linearly allocated array of buffers used to communicate152/// with the other process. This function returns the appropriate slot in this153/// array such that the process can operate on an entire warp or wavefront.154RPC_ATTRS Buffer *get_packet(uint32_t index, uint32_t lane_size) {155return &packet[index * lane_size];156}157158/// Determines if this process needs to wait for ownership of the buffer. We159/// invert the condition on one of the processes to indicate that if one160/// process owns the buffer then the other does not.161RPC_ATTRS static bool buffer_unavailable(uint32_t in, uint32_t out) {162bool cond = in != out;163return Invert ? !cond : cond;164}165166/// Attempt to claim the lock at index. Return true on lock taken.167/// lane_mask is a bitmap of the threads in the warp that would hold the168/// single lock on success, e.g. the result of rpc::get_lane_mask()169/// The lock is held when the n-th bit of the lock bitfield is set.170RPC_ATTRS bool try_lock(uint64_t lane_mask, uint32_t index) {171// On amdgpu, test and set to the nth lock bit and a sync_lane would suffice172// On volta, need to handle differences between the threads running and173// the threads that were detected in the previous call to get_lane_mask()174//175// All threads in lane_mask try to claim the lock. At most one can succeed.176// There may be threads active which are not in lane mask which must not177// succeed in taking the lock, as otherwise it will leak. This is handled178// by making threads which are not in lane_mask or with 0, a no-op.179uint32_t id = rpc::get_lane_id();180bool id_in_lane_mask = lane_mask & (1ul << id);181182// All threads in the warp call fetch_or. Possibly at the same time.183bool before = set_nth(lock, index, id_in_lane_mask);184uint64_t packed = rpc::ballot(lane_mask, before);185186// If every bit set in lane_mask is also set in packed, every single thread187// in the warp failed to get the lock. Ballot returns unset for threads not188// in the lane mask.189//190// Cases, per thread:191// mask==0 -> unspecified before, discarded by ballot -> 0192// mask==1 and before==0 (success), set zero by ballot -> 0193// mask==1 and before==1 (failure), set one by ballot -> 1194//195// mask != packed implies at least one of the threads got the lock196// atomic semantics of fetch_or mean at most one of the threads for the lock197198// If holding the lock then the caller can load values knowing said loads199// won't move past the lock. No such guarantee is needed if the lock acquire200// failed. This conditional branch is expected to fold in the caller after201// inlining the current function.202bool holding_lock = lane_mask != packed;203if (holding_lock)204__scoped_atomic_thread_fence(__ATOMIC_ACQUIRE, __MEMORY_SCOPE_DEVICE);205return holding_lock;206}207208/// Unlock the lock at index. We need a lane sync to keep this function209/// convergent, otherwise the compiler will sink the store and deadlock.210RPC_ATTRS void unlock(uint64_t lane_mask, uint32_t index) {211// Do not move any writes past the unlock.212__scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_DEVICE);213214// Use exactly one thread to clear the nth bit in the lock array Must215// restrict to a single thread to avoid one thread dropping the lock, then216// an unrelated warp claiming the lock, then a second thread in this warp217// dropping the lock again.218clear_nth(lock, index, rpc::is_first_lane(lane_mask));219rpc::sync_lane(lane_mask);220}221222/// Number of bytes to allocate for an inbox or outbox.223RPC_ATTRS static constexpr uint64_t mailbox_bytes(uint32_t port_count) {224return port_count * sizeof(uint32_t);225}226227/// Number of bytes to allocate for the buffer containing the packets.228RPC_ATTRS static constexpr uint64_t buffer_bytes(uint32_t port_count,229uint32_t lane_size) {230return port_count * lane_size * sizeof(Buffer);231}232233/// Offset of the inbox in memory. This is the same as the outbox if inverted.234RPC_ATTRS static constexpr uint64_t inbox_offset(uint32_t port_count) {235return Invert ? mailbox_bytes(port_count) : 0;236}237238/// Offset of the outbox in memory. This is the same as the inbox if inverted.239RPC_ATTRS static constexpr uint64_t outbox_offset(uint32_t port_count) {240return Invert ? 0 : mailbox_bytes(port_count);241}242243/// Offset of the buffer containing the packets after the inbox and outbox.244RPC_ATTRS static constexpr uint64_t header_offset(uint32_t port_count) {245return align_up(2 * mailbox_bytes(port_count), alignof(Header));246}247248/// Offset of the buffer containing the packets after the inbox and outbox.249RPC_ATTRS static constexpr uint64_t buffer_offset(uint32_t port_count) {250return align_up(header_offset(port_count) + port_count * sizeof(Header),251alignof(Buffer));252}253254/// Conditionally set the n-th bit in the atomic bitfield.255RPC_ATTRS static constexpr uint32_t set_nth(uint32_t *bits, uint32_t index,256bool cond) {257uint32_t slot = index / NUM_BITS_IN_WORD;258uint32_t bit = index % NUM_BITS_IN_WORD;259return __scoped_atomic_fetch_or(&bits[slot],260static_cast<uint32_t>(cond) << bit,261__ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE) &262(1u << bit);263}264265/// Conditionally clear the n-th bit in the atomic bitfield.266RPC_ATTRS static constexpr uint32_t clear_nth(uint32_t *bits, uint32_t index,267bool cond) {268uint32_t slot = index / NUM_BITS_IN_WORD;269uint32_t bit = index % NUM_BITS_IN_WORD;270return __scoped_atomic_fetch_and(&bits[slot],271~0u ^ (static_cast<uint32_t>(cond) << bit),272__ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE) &273(1u << bit);274}275};276277/// Invokes a function across every active buffer across the total lane size.278template <typename F>279RPC_ATTRS static void invoke_rpc(F &&fn, uint32_t lane_size, uint64_t lane_mask,280Buffer *slot) {281if constexpr (is_process_gpu()) {282fn(&slot[rpc::get_lane_id()], rpc::get_lane_id());283} else {284for (uint32_t i = 0; i < lane_size; i += rpc::get_num_lanes())285if (lane_mask & (1ul << i))286fn(&slot[i], i);287}288}289290/// The port provides the interface to communicate between the multiple291/// processes. A port is conceptually an index into the memory provided by the292/// underlying process that is guarded by a lock bit.293template <bool T> struct Port {294RPC_ATTRS Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,295uint32_t index, uint32_t out)296: process(process), lane_mask(lane_mask), lane_size(lane_size),297index(index), out(out), receive(false), owns_buffer(true) {}298RPC_ATTRS ~Port() = default;299300private:301RPC_ATTRS Port(const Port &) = delete;302RPC_ATTRS Port &operator=(const Port &) = delete;303RPC_ATTRS Port(Port &&) = default;304RPC_ATTRS Port &operator=(Port &&) = default;305306friend struct Client;307friend struct Server;308friend class rpc::optional<Port<T>>;309310public:311template <typename U> RPC_ATTRS void recv(U use);312template <typename F> RPC_ATTRS void send(F fill);313template <typename F, typename U> RPC_ATTRS void send_and_recv(F fill, U use);314template <typename W> RPC_ATTRS void recv_and_send(W work);315RPC_ATTRS void send_n(const void *const *src, uint64_t *size);316RPC_ATTRS void send_n(const void *src, uint64_t size);317template <typename A>318RPC_ATTRS void recv_n(void **dst, uint64_t *size, A &&alloc);319320RPC_ATTRS uint32_t get_opcode() const { return process.header[index].opcode; }321322RPC_ATTRS uint32_t get_index() const { return index; }323324RPC_ATTRS void close() {325// Wait for all lanes to finish using the port.326rpc::sync_lane(lane_mask);327328// The server is passive, if it own the buffer when it closes we need to329// give ownership back to the client.330if (owns_buffer && T)331out = process.invert_outbox(index, out);332process.unlock(lane_mask, index);333}334335private:336Process<T> &process;337uint64_t lane_mask;338uint32_t lane_size;339uint32_t index;340uint32_t out;341bool receive;342bool owns_buffer;343};344345/// The RPC client used to make requests to the server.346struct Client {347RPC_ATTRS Client() = default;348RPC_ATTRS Client(const Client &) = delete;349RPC_ATTRS Client &operator=(const Client &) = delete;350RPC_ATTRS ~Client() = default;351352RPC_ATTRS Client(uint32_t port_count, void *buffer)353: process(port_count, buffer) {}354355using Port = rpc::Port<false>;356template <uint32_t opcode> RPC_ATTRS Port open();357358private:359Process<false> process;360};361362/// The RPC server used to respond to the client.363struct Server {364RPC_ATTRS Server() = default;365RPC_ATTRS Server(const Server &) = delete;366RPC_ATTRS Server &operator=(const Server &) = delete;367RPC_ATTRS ~Server() = default;368369RPC_ATTRS Server(uint32_t port_count, void *buffer)370: process(port_count, buffer) {}371372using Port = rpc::Port<true>;373RPC_ATTRS rpc::optional<Port> try_open(uint32_t lane_size,374uint32_t start = 0);375RPC_ATTRS Port open(uint32_t lane_size);376377RPC_ATTRS static constexpr uint64_t allocation_size(uint32_t lane_size,378uint32_t port_count) {379return Process<true>::allocation_size(port_count, lane_size);380}381382private:383Process<true> process;384};385386/// Applies \p fill to the shared buffer and initiates a send operation.387template <bool T> template <typename F> RPC_ATTRS void Port<T>::send(F fill) {388uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);389390// We need to wait until we own the buffer before sending.391process.wait_for_ownership(lane_mask, index, out, in);392393// Apply the \p fill function to initialize the buffer and release the memory.394invoke_rpc(fill, lane_size, process.header[index].mask,395process.get_packet(index, lane_size));396out = process.invert_outbox(index, out);397owns_buffer = false;398receive = false;399}400401/// Applies \p use to the shared buffer and acknowledges the send.402template <bool T> template <typename U> RPC_ATTRS void Port<T>::recv(U use) {403// We only exchange ownership of the buffer during a receive if we are waiting404// for a previous receive to finish.405if (receive) {406out = process.invert_outbox(index, out);407owns_buffer = false;408}409410uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);411412// We need to wait until we own the buffer before receiving.413process.wait_for_ownership(lane_mask, index, out, in);414415// Apply the \p use function to read the memory out of the buffer.416invoke_rpc(use, lane_size, process.header[index].mask,417process.get_packet(index, lane_size));418receive = true;419owns_buffer = true;420}421422/// Combines a send and receive into a single function.423template <bool T>424template <typename F, typename U>425RPC_ATTRS void Port<T>::send_and_recv(F fill, U use) {426send(fill);427recv(use);428}429430/// Combines a receive and send operation into a single function. The \p work431/// function modifies the buffer in-place and the send is only used to initiate432/// the copy back.433template <bool T>434template <typename W>435RPC_ATTRS void Port<T>::recv_and_send(W work) {436recv(work);437send([](Buffer *, uint32_t) { /* no-op */ });438}439440/// Helper routine to simplify the interface when sending from the GPU using441/// thread private pointers to the underlying value.442template <bool T>443RPC_ATTRS void Port<T>::send_n(const void *src, uint64_t size) {444const void **src_ptr = &src;445uint64_t *size_ptr = &size;446send_n(src_ptr, size_ptr);447}448449/// Sends an arbitrarily sized data buffer \p src across the shared channel in450/// multiples of the packet length.451template <bool T>452RPC_ATTRS void Port<T>::send_n(const void *const *src, uint64_t *size) {453uint64_t num_sends = 0;454send([&](Buffer *buffer, uint32_t id) {455reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(size, id);456num_sends = is_process_gpu() ? lane_value(size, id)457: rpc::max(lane_value(size, id), num_sends);458uint64_t len =459lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)460? sizeof(Buffer::data) - sizeof(uint64_t)461: lane_value(size, id);462rpc_memcpy(&buffer->data[1], lane_value(src, id), len);463});464uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);465uint64_t mask = process.header[index].mask;466while (rpc::ballot(mask, idx < num_sends)) {467send([=](Buffer *buffer, uint32_t id) {468uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)469? sizeof(Buffer::data)470: lane_value(size, id) - idx;471if (idx < lane_value(size, id))472rpc_memcpy(buffer->data, advance(lane_value(src, id), idx), len);473});474idx += sizeof(Buffer::data);475}476}477478/// Receives an arbitrarily sized data buffer across the shared channel in479/// multiples of the packet length. The \p alloc function is called with the480/// size of the data so that we can initialize the size of the \p dst buffer.481template <bool T>482template <typename A>483RPC_ATTRS void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {484uint64_t num_recvs = 0;485recv([&](Buffer *buffer, uint32_t id) {486lane_value(size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];487lane_value(dst, id) =488reinterpret_cast<uint8_t *>(alloc(lane_value(size, id)));489num_recvs = is_process_gpu() ? lane_value(size, id)490: rpc::max(lane_value(size, id), num_recvs);491uint64_t len =492lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)493? sizeof(Buffer::data) - sizeof(uint64_t)494: lane_value(size, id);495rpc_memcpy(lane_value(dst, id), &buffer->data[1], len);496});497uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);498uint64_t mask = process.header[index].mask;499while (rpc::ballot(mask, idx < num_recvs)) {500recv([=](Buffer *buffer, uint32_t id) {501uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)502? sizeof(Buffer::data)503: lane_value(size, id) - idx;504if (idx < lane_value(size, id))505rpc_memcpy(advance(lane_value(dst, id), idx), buffer->data, len);506});507idx += sizeof(Buffer::data);508}509}510511/// Continually attempts to open a port to use as the client. The client can512/// only open a port if we find an index that is in a valid sending state. That513/// is, there are send operations pending that haven't been serviced on this514/// port. Each port instance uses an associated \p opcode to tell the server515/// what to do. The Client interface provides the appropriate lane size to the516/// port using the platform's returned value.517template <uint32_t opcode> RPC_ATTRS Client::Port Client::open() {518// Repeatedly perform a naive linear scan for a port that can be opened to519// send data.520for (uint32_t index = 0;; ++index) {521// Start from the beginning if we run out of ports to check.522if (index >= process.port_count)523index = 0;524525// Attempt to acquire the lock on this index.526uint64_t lane_mask = rpc::get_lane_mask();527if (!process.try_lock(lane_mask, index))528continue;529530uint32_t in = process.load_inbox(lane_mask, index);531uint32_t out = process.load_outbox(lane_mask, index);532533// Once we acquire the index we need to check if we are in a valid sending534// state.535if (process.buffer_unavailable(in, out)) {536process.unlock(lane_mask, index);537continue;538}539540if (rpc::is_first_lane(lane_mask)) {541process.header[index].opcode = opcode;542process.header[index].mask = lane_mask;543}544rpc::sync_lane(lane_mask);545return Port(process, lane_mask, rpc::get_num_lanes(), index, out);546}547}548549/// Attempts to open a port to use as the server. The server can only open a550/// port if it has a pending receive operation551RPC_ATTRS rpc::optional<typename Server::Port>552Server::try_open(uint32_t lane_size, uint32_t start) {553// Perform a naive linear scan for a port that has a pending request.554for (uint32_t index = start; index < process.port_count; ++index) {555uint64_t lane_mask = rpc::get_lane_mask();556uint32_t in = process.load_inbox(lane_mask, index);557uint32_t out = process.load_outbox(lane_mask, index);558559// The server is passive, if there is no work pending don't bother560// opening a port.561if (process.buffer_unavailable(in, out))562continue;563564// Attempt to acquire the lock on this index.565if (!process.try_lock(lane_mask, index))566continue;567568in = process.load_inbox(lane_mask, index);569out = process.load_outbox(lane_mask, index);570571if (process.buffer_unavailable(in, out)) {572process.unlock(lane_mask, index);573continue;574}575576return Port(process, lane_mask, lane_size, index, out);577}578return rpc::nullopt;579}580581RPC_ATTRS Server::Port Server::open(uint32_t lane_size) {582for (;;) {583if (rpc::optional<Server::Port> p = try_open(lane_size))584return rpc::move(p.value());585sleep_briefly();586}587}588589#undef RPC_ATTRS590#if !__has_builtin(__scoped_atomic_load_n)591#undef __scoped_atomic_load_n592#undef __scoped_atomic_store_n593#undef __scoped_atomic_fetch_or594#undef __scoped_atomic_fetch_and595#endif596#if !__has_builtin(__scoped_atomic_thread_fence)597#undef __scoped_atomic_thread_fence598#endif599600} // namespace rpc601602#endif // LLVM_LIBC_SHARED_RPC_H603604605