1 //===-- Shared memory RPC client / server interface -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a remote procedure call mechanism to communicate between
10 // heterogeneous devices that can share an address space atomically. We provide
11 // a client and a server to facilitate the remote call. The client makes request
12 // to the server using a shared communication channel. We use separate atomic
13 // signals to indicate which side, the client or the server is in ownership of
14 // the buffer.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #ifndef LLVM_LIBC_SHARED_RPC_H
19 #define LLVM_LIBC_SHARED_RPC_H
20
21 #include "rpc_util.h"
22
23 #include <stdint.h>
24
25 #ifndef RPC_INLINE
26 #define RPC_INLINE inline
27 #endif
28
29 namespace rpc {
30
31 /// Use scoped atomic variants if they are available for the target.
32 #if !__has_builtin(__scoped_atomic_load_n)
33 #define __scoped_atomic_load_n(src, ord, scp) __atomic_load_n(src, ord)
34 #define __scoped_atomic_store_n(dst, src, ord, scp) \
35 __atomic_store_n(dst, src, ord)
36 #define __scoped_atomic_fetch_or(src, val, ord, scp) \
37 __atomic_fetch_or(src, val, ord)
38 #define __scoped_atomic_fetch_and(src, val, ord, scp) \
39 __atomic_fetch_and(src, val, ord)
40 #endif
41 #if !__has_builtin(__scoped_atomic_thread_fence)
42 #define __scoped_atomic_thread_fence(ord, scp) __atomic_thread_fence(ord)
43 #endif
44
45 /// Generic codes that can be used whem implementing the server.
46 enum Status {
47 SUCCESS = 0x0,
48 ERROR = 0x1000,
49 UNHANDLED_OPCODE = 0x1001,
50 };
51
52 /// A fixed size channel used to communicate between the RPC client and server.
53 struct Buffer {
54 uint64_t data[8];
55 };
56 static_assert(sizeof(Buffer) == 64, "Buffer size mismatch");
57
58 /// The information associated with a packet. This indicates which operations to
59 /// perform and which threads are active in the slots.
60 struct Header {
61 uint64_t mask;
62 uint32_t opcode;
63 };
64
65 /// The maximum number of parallel ports that the RPC interface can support.
66 constexpr static uint64_t MAX_PORT_COUNT = 4096;
67
68 /// A common process used to synchronize communication between a client and a
69 /// server. The process contains a read-only inbox and a write-only outbox used
70 /// for signaling ownership of the shared buffer between both sides. We assign
71 /// ownership of the buffer to the client if the inbox and outbox bits match,
72 /// otherwise it is owned by the server.
73 ///
74 /// This process is designed to allow the client and the server to exchange data
75 /// using a fixed size packet in a mostly arbitrary order using the 'send' and
76 /// 'recv' operations. The following restrictions to this scheme apply:
77 /// - The client will always start with a 'send' operation.
78 /// - The server will always start with a 'recv' operation.
79 /// - Every 'send' or 'recv' call is mirrored by the other process.
80 template <bool Invert> struct Process {
81 RPC_INLINE Process() = default;
82 RPC_INLINE Process(const Process &) = delete;
83 RPC_INLINE Process &operator=(const Process &) = delete;
84 RPC_INLINE Process(Process &&) = default;
85 RPC_INLINE Process &operator=(Process &&) = default;
86 RPC_INLINE ~Process() = default;
87
88 const uint32_t port_count = 0;
89 const uint32_t *const inbox = nullptr;
90 uint32_t *const outbox = nullptr;
91 Header *const header = nullptr;
92 Buffer *const packet = nullptr;
93
94 static constexpr uint64_t NUM_BITS_IN_WORD = sizeof(uint32_t) * 8;
95 uint32_t lock[MAX_PORT_COUNT / NUM_BITS_IN_WORD] = {0};
96
ProcessProcess97 RPC_INLINE Process(uint32_t port_count, void *buffer)
98 : port_count(port_count), inbox(reinterpret_cast<uint32_t *>(
99 advance(buffer, inbox_offset(port_count)))),
100 outbox(reinterpret_cast<uint32_t *>(
101 advance(buffer, outbox_offset(port_count)))),
102 header(reinterpret_cast<Header *>(
103 advance(buffer, header_offset(port_count)))),
104 packet(reinterpret_cast<Buffer *>(
105 advance(buffer, buffer_offset(port_count)))) {}
106
107 /// Allocate a memory buffer sufficient to store the following equivalent
108 /// representation in memory.
109 ///
110 /// struct Equivalent {
111 /// Atomic<uint32_t> primary[port_count];
112 /// Atomic<uint32_t> secondary[port_count];
113 /// Header header[port_count];
114 /// Buffer packet[port_count][lane_size];
115 /// };
allocation_sizeProcess116 RPC_INLINE static constexpr uint64_t allocation_size(uint32_t port_count,
117 uint32_t lane_size) {
118 return buffer_offset(port_count) + buffer_bytes(port_count, lane_size);
119 }
120
121 /// Retrieve the inbox state from memory shared between processes.
load_inboxProcess122 RPC_INLINE uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {
123 return rpc::broadcast_value(
124 lane_mask, __scoped_atomic_load_n(&inbox[index], __ATOMIC_RELAXED,
125 __MEMORY_SCOPE_SYSTEM));
126 }
127
128 /// Retrieve the outbox state from memory shared between processes.
load_outboxProcess129 RPC_INLINE uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {
130 return rpc::broadcast_value(
131 lane_mask, __scoped_atomic_load_n(&outbox[index], __ATOMIC_RELAXED,
132 __MEMORY_SCOPE_SYSTEM));
133 }
134
135 /// Signal to the other process that this one is finished with the buffer.
136 /// Equivalent to loading outbox followed by store of the inverted value
137 /// The outbox is write only by this warp and tracking the value locally is
138 /// cheaper than calling load_outbox to get the value to store.
invert_outboxProcess139 RPC_INLINE uint32_t invert_outbox(uint32_t index, uint32_t current_outbox) {
140 uint32_t inverted_outbox = !current_outbox;
141 __scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM);
142 __scoped_atomic_store_n(&outbox[index], inverted_outbox, __ATOMIC_RELAXED,
143 __MEMORY_SCOPE_SYSTEM);
144 return inverted_outbox;
145 }
146
147 // Given the current outbox and inbox values, wait until the inbox changes
148 // to indicate that this thread owns the buffer element.
wait_for_ownershipProcess149 RPC_INLINE void wait_for_ownership(uint64_t lane_mask, uint32_t index,
150 uint32_t outbox, uint32_t in) {
151 while (buffer_unavailable(in, outbox)) {
152 sleep_briefly();
153 in = load_inbox(lane_mask, index);
154 }
155 __scoped_atomic_thread_fence(__ATOMIC_ACQUIRE, __MEMORY_SCOPE_SYSTEM);
156 }
157
158 /// The packet is a linearly allocated array of buffers used to communicate
159 /// with the other process. This function returns the appropriate slot in this
160 /// array such that the process can operate on an entire warp or wavefront.
get_packetProcess161 RPC_INLINE Buffer *get_packet(uint32_t index, uint32_t lane_size) {
162 return &packet[index * lane_size];
163 }
164
165 /// Determines if this process needs to wait for ownership of the buffer. We
166 /// invert the condition on one of the processes to indicate that if one
167 /// process owns the buffer then the other does not.
buffer_unavailableProcess168 RPC_INLINE static bool buffer_unavailable(uint32_t in, uint32_t out) {
169 bool cond = in != out;
170 return Invert ? !cond : cond;
171 }
172
173 /// Attempt to claim the lock at index. Return true on lock taken.
174 /// lane_mask is a bitmap of the threads in the warp that would hold the
175 /// single lock on success, e.g. the result of rpc::get_lane_mask()
176 /// The lock is held when the n-th bit of the lock bitfield is set.
try_lockProcess177 RPC_INLINE bool try_lock(uint64_t lane_mask, uint32_t index) {
178 // On amdgpu, test and set to the nth lock bit and a sync_lane would suffice
179 // On volta, need to handle differences between the threads running and
180 // the threads that were detected in the previous call to get_lane_mask()
181 //
182 // All threads in lane_mask try to claim the lock. At most one can succeed.
183 // There may be threads active which are not in lane mask which must not
184 // succeed in taking the lock, as otherwise it will leak. This is handled
185 // by making threads which are not in lane_mask or with 0, a no-op.
186 uint32_t id = rpc::get_lane_id();
187 bool id_in_lane_mask = lane_mask & (1ul << id);
188
189 // All threads in the warp call fetch_or. Possibly at the same time.
190 bool before = set_nth(lock, index, id_in_lane_mask);
191 uint64_t packed = rpc::ballot(lane_mask, before);
192
193 // If every bit set in lane_mask is also set in packed, every single thread
194 // in the warp failed to get the lock. Ballot returns unset for threads not
195 // in the lane mask.
196 //
197 // Cases, per thread:
198 // mask==0 -> unspecified before, discarded by ballot -> 0
199 // mask==1 and before==0 (success), set zero by ballot -> 0
200 // mask==1 and before==1 (failure), set one by ballot -> 1
201 //
202 // mask != packed implies at least one of the threads got the lock
203 // atomic semantics of fetch_or mean at most one of the threads for the lock
204
205 // If holding the lock then the caller can load values knowing said loads
206 // won't move past the lock. No such guarantee is needed if the lock acquire
207 // failed. This conditional branch is expected to fold in the caller after
208 // inlining the current function.
209 bool holding_lock = lane_mask != packed;
210 if (holding_lock)
211 __scoped_atomic_thread_fence(__ATOMIC_ACQUIRE, __MEMORY_SCOPE_DEVICE);
212 return holding_lock;
213 }
214
215 /// Unlock the lock at index. We need a lane sync to keep this function
216 /// convergent, otherwise the compiler will sink the store and deadlock.
unlockProcess217 RPC_INLINE void unlock(uint64_t lane_mask, uint32_t index) {
218 // Do not move any writes past the unlock.
219 __scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_DEVICE);
220
221 // Use exactly one thread to clear the nth bit in the lock array Must
222 // restrict to a single thread to avoid one thread dropping the lock, then
223 // an unrelated warp claiming the lock, then a second thread in this warp
224 // dropping the lock again.
225 clear_nth(lock, index, rpc::is_first_lane(lane_mask));
226 rpc::sync_lane(lane_mask);
227 }
228
229 /// Number of bytes to allocate for an inbox or outbox.
mailbox_bytesProcess230 RPC_INLINE static constexpr uint64_t mailbox_bytes(uint32_t port_count) {
231 return port_count * sizeof(uint32_t);
232 }
233
234 /// Number of bytes to allocate for the buffer containing the packets.
buffer_bytesProcess235 RPC_INLINE static constexpr uint64_t buffer_bytes(uint32_t port_count,
236 uint32_t lane_size) {
237 return port_count * lane_size * sizeof(Buffer);
238 }
239
240 /// Offset of the inbox in memory. This is the same as the outbox if inverted.
inbox_offsetProcess241 RPC_INLINE static constexpr uint64_t inbox_offset(uint32_t port_count) {
242 return Invert ? mailbox_bytes(port_count) : 0;
243 }
244
245 /// Offset of the outbox in memory. This is the same as the inbox if inverted.
outbox_offsetProcess246 RPC_INLINE static constexpr uint64_t outbox_offset(uint32_t port_count) {
247 return Invert ? 0 : mailbox_bytes(port_count);
248 }
249
250 /// Offset of the buffer containing the packets after the inbox and outbox.
header_offsetProcess251 RPC_INLINE static constexpr uint64_t header_offset(uint32_t port_count) {
252 return align_up(2 * mailbox_bytes(port_count), alignof(Header));
253 }
254
255 /// Offset of the buffer containing the packets after the inbox and outbox.
buffer_offsetProcess256 RPC_INLINE static constexpr uint64_t buffer_offset(uint32_t port_count) {
257 return align_up(header_offset(port_count) + port_count * sizeof(Header),
258 alignof(Buffer));
259 }
260
261 /// Conditionally set the n-th bit in the atomic bitfield.
set_nthProcess262 RPC_INLINE static constexpr uint32_t set_nth(uint32_t *bits, uint32_t index,
263 bool cond) {
264 uint32_t slot = index / NUM_BITS_IN_WORD;
265 uint32_t bit = index % NUM_BITS_IN_WORD;
266 return __scoped_atomic_fetch_or(&bits[slot],
267 static_cast<uint32_t>(cond) << bit,
268 __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE) &
269 (1u << bit);
270 }
271
272 /// Conditionally clear the n-th bit in the atomic bitfield.
clear_nthProcess273 RPC_INLINE static constexpr uint32_t clear_nth(uint32_t *bits, uint32_t index,
274 bool cond) {
275 uint32_t slot = index / NUM_BITS_IN_WORD;
276 uint32_t bit = index % NUM_BITS_IN_WORD;
277 return __scoped_atomic_fetch_and(&bits[slot],
278 ~0u ^ (static_cast<uint32_t>(cond) << bit),
279 __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE) &
280 (1u << bit);
281 }
282 };
283
284 /// Invokes a function accross every active buffer across the total lane size.
285 template <typename F>
invoke_rpc(F && fn,uint32_t lane_size,uint64_t lane_mask,Buffer * slot)286 RPC_INLINE static void invoke_rpc(F &&fn, uint32_t lane_size,
287 uint64_t lane_mask, Buffer *slot) {
288 if constexpr (is_process_gpu()) {
289 fn(&slot[rpc::get_lane_id()], rpc::get_lane_id());
290 } else {
291 for (uint32_t i = 0; i < lane_size; i += rpc::get_num_lanes())
292 if (lane_mask & (1ul << i))
293 fn(&slot[i], i);
294 }
295 }
296
297 /// The port provides the interface to communicate between the multiple
298 /// processes. A port is conceptually an index into the memory provided by the
299 /// underlying process that is guarded by a lock bit.
300 template <bool T> struct Port {
PortPort301 RPC_INLINE Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,
302 uint32_t index, uint32_t out)
303 : process(process), lane_mask(lane_mask), lane_size(lane_size),
304 index(index), out(out), receive(false), owns_buffer(true) {}
305 RPC_INLINE ~Port() = default;
306
307 private:
308 RPC_INLINE Port(const Port &) = delete;
309 RPC_INLINE Port &operator=(const Port &) = delete;
310 RPC_INLINE Port(Port &&) = default;
311 RPC_INLINE Port &operator=(Port &&) = default;
312
313 friend struct Client;
314 friend struct Server;
315 friend class rpc::optional<Port<T>>;
316
317 public:
318 template <typename U> RPC_INLINE void recv(U use);
319 template <typename F> RPC_INLINE void send(F fill);
320 template <typename F, typename U>
321 RPC_INLINE void send_and_recv(F fill, U use);
322 template <typename W> RPC_INLINE void recv_and_send(W work);
323 RPC_INLINE void send_n(const void *const *src, uint64_t *size);
324 RPC_INLINE void send_n(const void *src, uint64_t size);
325 template <typename A>
326 RPC_INLINE void recv_n(void **dst, uint64_t *size, A &&alloc);
327
get_opcodePort328 RPC_INLINE uint32_t get_opcode() const {
329 return process.header[index].opcode;
330 }
331
get_indexPort332 RPC_INLINE uint32_t get_index() const { return index; }
333
closePort334 RPC_INLINE void close() {
335 // Wait for all lanes to finish using the port.
336 rpc::sync_lane(lane_mask);
337
338 // The server is passive, if it own the buffer when it closes we need to
339 // give ownership back to the client.
340 if (owns_buffer && T)
341 out = process.invert_outbox(index, out);
342 process.unlock(lane_mask, index);
343 }
344
345 private:
346 Process<T> &process;
347 uint64_t lane_mask;
348 uint32_t lane_size;
349 uint32_t index;
350 uint32_t out;
351 bool receive;
352 bool owns_buffer;
353 };
354
355 /// The RPC client used to make requests to the server.
356 struct Client {
357 RPC_INLINE Client() = default;
358 RPC_INLINE Client(const Client &) = delete;
359 RPC_INLINE Client &operator=(const Client &) = delete;
360 RPC_INLINE ~Client() = default;
361
ClientClient362 RPC_INLINE Client(uint32_t port_count, void *buffer)
363 : process(port_count, buffer) {}
364
365 using Port = rpc::Port<false>;
366 template <uint32_t opcode> RPC_INLINE Port open();
367
368 private:
369 Process<false> process;
370 };
371
372 /// The RPC server used to respond to the client.
373 struct Server {
374 RPC_INLINE Server() = default;
375 RPC_INLINE Server(const Server &) = delete;
376 RPC_INLINE Server &operator=(const Server &) = delete;
377 RPC_INLINE ~Server() = default;
378
ServerServer379 RPC_INLINE Server(uint32_t port_count, void *buffer)
380 : process(port_count, buffer) {}
381
382 using Port = rpc::Port<true>;
383 RPC_INLINE rpc::optional<Port> try_open(uint32_t lane_size,
384 uint32_t start = 0);
385 RPC_INLINE Port open(uint32_t lane_size);
386
allocation_sizeServer387 RPC_INLINE static uint64_t allocation_size(uint32_t lane_size,
388 uint32_t port_count) {
389 return Process<true>::allocation_size(port_count, lane_size);
390 }
391
392 private:
393 Process<true> process;
394 };
395
396 /// Applies \p fill to the shared buffer and initiates a send operation.
send(F fill)397 template <bool T> template <typename F> RPC_INLINE void Port<T>::send(F fill) {
398 uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
399
400 // We need to wait until we own the buffer before sending.
401 process.wait_for_ownership(lane_mask, index, out, in);
402
403 // Apply the \p fill function to initialize the buffer and release the memory.
404 invoke_rpc(fill, lane_size, process.header[index].mask,
405 process.get_packet(index, lane_size));
406 out = process.invert_outbox(index, out);
407 owns_buffer = false;
408 receive = false;
409 }
410
411 /// Applies \p use to the shared buffer and acknowledges the send.
recv(U use)412 template <bool T> template <typename U> RPC_INLINE void Port<T>::recv(U use) {
413 // We only exchange ownership of the buffer during a receive if we are waiting
414 // for a previous receive to finish.
415 if (receive) {
416 out = process.invert_outbox(index, out);
417 owns_buffer = false;
418 }
419
420 uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
421
422 // We need to wait until we own the buffer before receiving.
423 process.wait_for_ownership(lane_mask, index, out, in);
424
425 // Apply the \p use function to read the memory out of the buffer.
426 invoke_rpc(use, lane_size, process.header[index].mask,
427 process.get_packet(index, lane_size));
428 receive = true;
429 owns_buffer = true;
430 }
431
432 /// Combines a send and receive into a single function.
433 template <bool T>
434 template <typename F, typename U>
send_and_recv(F fill,U use)435 RPC_INLINE void Port<T>::send_and_recv(F fill, U use) {
436 send(fill);
437 recv(use);
438 }
439
440 /// Combines a receive and send operation into a single function. The \p work
441 /// function modifies the buffer in-place and the send is only used to initiate
442 /// the copy back.
443 template <bool T>
444 template <typename W>
recv_and_send(W work)445 RPC_INLINE void Port<T>::recv_and_send(W work) {
446 recv(work);
447 send([](Buffer *, uint32_t) { /* no-op */ });
448 }
449
450 /// Helper routine to simplify the interface when sending from the GPU using
451 /// thread private pointers to the underlying value.
452 template <bool T>
send_n(const void * src,uint64_t size)453 RPC_INLINE void Port<T>::send_n(const void *src, uint64_t size) {
454 const void **src_ptr = &src;
455 uint64_t *size_ptr = &size;
456 send_n(src_ptr, size_ptr);
457 }
458
459 /// Sends an arbitrarily sized data buffer \p src across the shared channel in
460 /// multiples of the packet length.
461 template <bool T>
send_n(const void * const * src,uint64_t * size)462 RPC_INLINE void Port<T>::send_n(const void *const *src, uint64_t *size) {
463 uint64_t num_sends = 0;
464 send([&](Buffer *buffer, uint32_t id) {
465 reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(size, id);
466 num_sends = is_process_gpu() ? lane_value(size, id)
467 : rpc::max(lane_value(size, id), num_sends);
468 uint64_t len =
469 lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
470 ? sizeof(Buffer::data) - sizeof(uint64_t)
471 : lane_value(size, id);
472 rpc_memcpy(&buffer->data[1], lane_value(src, id), len);
473 });
474 uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
475 uint64_t mask = process.header[index].mask;
476 while (rpc::ballot(mask, idx < num_sends)) {
477 send([=](Buffer *buffer, uint32_t id) {
478 uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
479 ? sizeof(Buffer::data)
480 : lane_value(size, id) - idx;
481 if (idx < lane_value(size, id))
482 rpc_memcpy(buffer->data, advance(lane_value(src, id), idx), len);
483 });
484 idx += sizeof(Buffer::data);
485 }
486 }
487
488 /// Receives an arbitrarily sized data buffer across the shared channel in
489 /// multiples of the packet length. The \p alloc function is called with the
490 /// size of the data so that we can initialize the size of the \p dst buffer.
491 template <bool T>
492 template <typename A>
recv_n(void ** dst,uint64_t * size,A && alloc)493 RPC_INLINE void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
494 uint64_t num_recvs = 0;
495 recv([&](Buffer *buffer, uint32_t id) {
496 lane_value(size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];
497 lane_value(dst, id) =
498 reinterpret_cast<uint8_t *>(alloc(lane_value(size, id)));
499 num_recvs = is_process_gpu() ? lane_value(size, id)
500 : rpc::max(lane_value(size, id), num_recvs);
501 uint64_t len =
502 lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
503 ? sizeof(Buffer::data) - sizeof(uint64_t)
504 : lane_value(size, id);
505 rpc_memcpy(lane_value(dst, id), &buffer->data[1], len);
506 });
507 uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
508 uint64_t mask = process.header[index].mask;
509 while (rpc::ballot(mask, idx < num_recvs)) {
510 recv([=](Buffer *buffer, uint32_t id) {
511 uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
512 ? sizeof(Buffer::data)
513 : lane_value(size, id) - idx;
514 if (idx < lane_value(size, id))
515 rpc_memcpy(advance(lane_value(dst, id), idx), buffer->data, len);
516 });
517 idx += sizeof(Buffer::data);
518 }
519 }
520
521 /// Continually attempts to open a port to use as the client. The client can
522 /// only open a port if we find an index that is in a valid sending state. That
523 /// is, there are send operations pending that haven't been serviced on this
524 /// port. Each port instance uses an associated \p opcode to tell the server
525 /// what to do. The Client interface provides the appropriate lane size to the
526 /// port using the platform's returned value.
open()527 template <uint32_t opcode> RPC_INLINE Client::Port Client::open() {
528 // Repeatedly perform a naive linear scan for a port that can be opened to
529 // send data.
530 for (uint32_t index = 0;; ++index) {
531 // Start from the beginning if we run out of ports to check.
532 if (index >= process.port_count)
533 index = 0;
534
535 // Attempt to acquire the lock on this index.
536 uint64_t lane_mask = rpc::get_lane_mask();
537 if (!process.try_lock(lane_mask, index))
538 continue;
539
540 uint32_t in = process.load_inbox(lane_mask, index);
541 uint32_t out = process.load_outbox(lane_mask, index);
542
543 // Once we acquire the index we need to check if we are in a valid sending
544 // state.
545 if (process.buffer_unavailable(in, out)) {
546 process.unlock(lane_mask, index);
547 continue;
548 }
549
550 if (rpc::is_first_lane(lane_mask)) {
551 process.header[index].opcode = opcode;
552 process.header[index].mask = lane_mask;
553 }
554 rpc::sync_lane(lane_mask);
555 return Port(process, lane_mask, rpc::get_num_lanes(), index, out);
556 }
557 }
558
559 /// Attempts to open a port to use as the server. The server can only open a
560 /// port if it has a pending receive operation
561 RPC_INLINE rpc::optional<typename Server::Port>
try_open(uint32_t lane_size,uint32_t start)562 Server::try_open(uint32_t lane_size, uint32_t start) {
563 // Perform a naive linear scan for a port that has a pending request.
564 for (uint32_t index = start; index < process.port_count; ++index) {
565 uint64_t lane_mask = rpc::get_lane_mask();
566 uint32_t in = process.load_inbox(lane_mask, index);
567 uint32_t out = process.load_outbox(lane_mask, index);
568
569 // The server is passive, if there is no work pending don't bother
570 // opening a port.
571 if (process.buffer_unavailable(in, out))
572 continue;
573
574 // Attempt to acquire the lock on this index.
575 if (!process.try_lock(lane_mask, index))
576 continue;
577
578 in = process.load_inbox(lane_mask, index);
579 out = process.load_outbox(lane_mask, index);
580
581 if (process.buffer_unavailable(in, out)) {
582 process.unlock(lane_mask, index);
583 continue;
584 }
585
586 return Port(process, lane_mask, lane_size, index, out);
587 }
588 return rpc::nullopt;
589 }
590
open(uint32_t lane_size)591 RPC_INLINE Server::Port Server::open(uint32_t lane_size) {
592 for (;;) {
593 if (rpc::optional<Server::Port> p = try_open(lane_size))
594 return rpc::move(p.value());
595 sleep_briefly();
596 }
597 }
598
599 #if !__has_builtin(__scoped_atomic_load_n)
600 #undef __scoped_atomic_load_n
601 #undef __scoped_atomic_store_n
602 #undef __scoped_atomic_fetch_or
603 #undef __scoped_atomic_fetch_and
604 #endif
605 #if !__has_builtin(__scoped_atomic_thread_fence)
606 #undef __scoped_atomic_thread_fence
607 #endif
608
609 } // namespace rpc
610
611 #endif // LLVM_LIBC_SHARED_RPC_H
612