1 /* 2 * Copyright 2018 Google 3 * SPDX-License-Identifier: MIT 4 */ 5 #pragma once 6 7 #ifdef __cplusplus 8 extern "C" { 9 #endif 10 11 #include <stdbool.h> 12 #include <stdint.h> 13 14 #define RING_BUFFER_SHIFT 11 15 #define RING_BUFFER_SIZE (1 << RING_BUFFER_SHIFT) 16 #define NUM_CONFIG_FIELDS 32 17 18 // Single producer/consumer ring buffer struct that can be shared 19 // between host and guest as-is. 20 struct ring_buffer { 21 uint32_t host_version; 22 uint32_t guest_version; 23 uint32_t write_pos; // Atomically updated for the consumer 24 uint32_t unused0[13]; // Separate cache line 25 uint32_t read_pos; // Atomically updated for the producer 26 uint32_t read_live_count; 27 uint32_t read_yield_count; 28 uint32_t read_sleep_us_count; 29 uint32_t unused1[12]; // Separate cache line 30 uint8_t buf[RING_BUFFER_SIZE]; 31 uint32_t state; // An atomically updated variable from both 32 // producer and consumer for other forms of 33 // coordination. 34 uint32_t config[NUM_CONFIG_FIELDS]; 35 }; 36 37 void ring_buffer_init(struct ring_buffer* r); 38 39 // Writes or reads step_size at a time. Sets errno=EAGAIN if full or empty. 40 // Returns the number of step_size steps read. 41 long ring_buffer_write(struct ring_buffer* r, const void* data, uint32_t step_size, uint32_t steps); 42 long ring_buffer_read(struct ring_buffer* r, void* data, uint32_t step_size, uint32_t steps); 43 // Like ring_buffer_write / ring_buffer_read, but merely advances the counters 44 // without reading or writing anything. Returns the number of step_size steps 45 // advanced. 46 long ring_buffer_advance_write(struct ring_buffer* r, uint32_t step_size, uint32_t steps); 47 long ring_buffer_advance_read(struct ring_buffer* r, uint32_t step_size, uint32_t steps); 48 49 // If we want to work with dynamically allocated buffers, a separate struct is 50 // needed; the host and guest are in different address spaces and thus have 51 // different views of the same memory, with the host and guest having different 52 // copies of this struct. 53 struct ring_buffer_view { 54 uint8_t* buf; 55 uint32_t size; 56 uint32_t mask; 57 }; 58 59 // Convenience struct that holds a pointer to a ring along with a view. It's a 60 // common pattern for the ring and the buffer of the view to be shared between 61 // two entities (in this case, usually guest and host). 62 struct ring_buffer_with_view { 63 struct ring_buffer* ring; 64 struct ring_buffer_view view; 65 }; 66 67 // Calculates the highest power of 2 so that 68 // (1 << shift) <= size. 69 uint32_t ring_buffer_calc_shift(uint32_t size); 70 71 // Initializes ring buffer with view using |buf|. If |size| is not a power of 72 // two, then the buffer will assume a size equal to the greater power of two 73 // less than |size|. 74 void ring_buffer_view_init(struct ring_buffer* r, struct ring_buffer_view* v, uint8_t* buf, 75 uint32_t size); 76 77 void ring_buffer_init_view_only(struct ring_buffer_view* v, uint8_t* buf, uint32_t size); 78 79 // Read/write functions with the view. 80 long ring_buffer_view_write(struct ring_buffer* r, struct ring_buffer_view* v, const void* data, 81 uint32_t step_size, uint32_t steps); 82 long ring_buffer_view_read(struct ring_buffer* r, struct ring_buffer_view* v, void* data, 83 uint32_t step_size, uint32_t steps); 84 85 // Usage of ring_buffer as a waitable object. 86 // These functions will back off if spinning too long. 87 // 88 // if |v| is null, it is assumed that the statically allocated ring buffer is 89 // used. 90 // 91 // Returns true if ring buffer became available, false if timed out. 92 bool ring_buffer_wait_write(const struct ring_buffer* r, const struct ring_buffer_view* v, 93 uint32_t bytes, uint64_t timeout_us); 94 bool ring_buffer_wait_read(const struct ring_buffer* r, const struct ring_buffer_view* v, 95 uint32_t bytes, uint64_t timeout_us); 96 97 // read/write fully, blocking if there is nothing to read/write. 98 void ring_buffer_write_fully(struct ring_buffer* r, struct ring_buffer_view* v, const void* data, 99 uint32_t bytes); 100 void ring_buffer_read_fully(struct ring_buffer* r, struct ring_buffer_view* v, void* data, 101 uint32_t bytes); 102 103 // Like read/write fully, but with an abort value. The value is read from 104 // |abortPtr| each time. If |abortPtr| is null, then behaves the same 105 // as ring_buffer_(read|write)_fully. 106 // Returns the actual number of bytes sent or received. 107 uint32_t ring_buffer_write_fully_with_abort(struct ring_buffer* r, struct ring_buffer_view* v, 108 const void* data, uint32_t bytes, uint32_t abort_value, 109 const volatile uint32_t* abort_ptr); 110 uint32_t ring_buffer_read_fully_with_abort(struct ring_buffer* r, struct ring_buffer_view* v, 111 void* data, uint32_t bytes, uint32_t abort_value, 112 const volatile uint32_t* abort_ptr); 113 114 uint32_t ring_buffer_view_get_ring_pos(const struct ring_buffer_view* v, uint32_t index); 115 116 bool ring_buffer_can_write(const struct ring_buffer* r, uint32_t bytes); 117 bool ring_buffer_can_read(const struct ring_buffer* r, uint32_t bytes); 118 bool ring_buffer_view_can_write(const struct ring_buffer* r, const struct ring_buffer_view* v, 119 uint32_t bytes); 120 bool ring_buffer_view_can_read(const struct ring_buffer* r, const struct ring_buffer_view* v, 121 uint32_t bytes); 122 uint32_t ring_buffer_available_read(const struct ring_buffer* r, const struct ring_buffer_view* v); 123 uint32_t ring_buffer_available_write(const struct ring_buffer* r, const struct ring_buffer_view* v); 124 // Copies out contents from the consumer side of 125 // ring buffer/view |r,v|. 126 // If there is less available read than |wanted_bytes|, 127 // returns -1. 128 // On success, returns 0. 129 int ring_buffer_copy_contents(const struct ring_buffer* r, const struct ring_buffer_view* v, 130 uint32_t wanted_bytes, uint8_t* res); 131 132 // Lockless synchronization where the consumer is allowed to hang up and go to 133 // sleep. This can be considered a sort of asymmetric lock for two threads, 134 // where the consumer can be more sleepy. It captures the pattern we usually use 135 // for emulator devices; the guest asks the host for something, and some host 136 // thread services the request and goes back to sleep. 137 enum ring_buffer_sync_state { 138 RING_BUFFER_SYNC_PRODUCER_IDLE = 0, 139 RING_BUFFER_SYNC_PRODUCER_ACTIVE = 1, 140 RING_BUFFER_SYNC_CONSUMER_HANGING_UP = 2, 141 RING_BUFFER_SYNC_CONSUMER_HUNG_UP = 3, 142 }; 143 144 // Sync state is RING_BUFFER_SYNC_PRODUCER_IDLE. 145 void ring_buffer_sync_init(struct ring_buffer* r); 146 147 // Tries to acquire the channel for sending. 148 // Returns false if the consumer was in the middle of hanging up, 149 // true if the producer successfully acquired the channel 150 // (put it in the RING_BUFFER_SYNC_PRODUCER_ACTIVE state). 151 bool ring_buffer_producer_acquire(struct ring_buffer* r); 152 // Same as above, but acquires from RING_BUFFER_SYNC_CONSUMER_HUNG_UP. 153 bool ring_buffer_producer_acquire_from_hangup(struct ring_buffer* r); 154 // Waits until the consumer hangs up. 155 void ring_buffer_producer_wait_hangup(struct ring_buffer* r); 156 // Sets the state back to RING_BUFFER_SYNC_PRODUCER_IDLE. 157 void ring_buffer_producer_idle(struct ring_buffer* r); 158 159 // There is no symmetric consumer acquire because the consumer can consume with 160 // the ring buffer being in any state (albeit with long waiting if the producer 161 // does not send anything) 162 163 // Tries to acquire the channel on the consumer side for 164 // hanging up. Returns false if the producer is in the middle of sending, 165 // true if the consumer successfully hung up the channel 166 // (put it in the RING_BUFFER_SYNC_CONSUMER_HUNG_UP state). 167 bool ring_buffer_consumer_hangup(struct ring_buffer* r); 168 // Waits until the producer has set the state to 169 // RING_BUFFER_SYNC_PRODUCER_IDLE. 170 void ring_buffer_consumer_wait_producer_idle(struct ring_buffer* r); 171 // Sets the state to hung up. 172 void ring_buffer_consumer_hung_up(struct ring_buffer* r); 173 174 // Convenient function to reschedule thread 175 void ring_buffer_yield(); 176 177 #ifdef __cplusplus 178 } 179 #endif 180