1 /*
2 * Copyright (c) 2008-2015 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdlib.h>
24 #include <debug.h>
25 #include <trace.h>
26 #include <pow2.h>
27 #include <string.h>
28 #include <assert.h>
29 #include <lib/cbuf.h>
30 #include <kernel/event.h>
31 #include <kernel/spinlock.h>
32
33 #define LOCAL_TRACE 0
34
35 #define INC_POINTER(cbuf, ptr, inc) \
36 modpow2(((ptr) + (inc)), (cbuf)->len_pow2)
37
cbuf_initialize(cbuf_t * cbuf,size_t len)38 void cbuf_initialize(cbuf_t *cbuf, size_t len)
39 {
40 cbuf_initialize_etc(cbuf, len, malloc(len));
41 }
42
cbuf_initialize_etc(cbuf_t * cbuf,size_t len,void * buf)43 void cbuf_initialize_etc(cbuf_t *cbuf, size_t len, void *buf)
44 {
45 DEBUG_ASSERT(cbuf);
46 DEBUG_ASSERT(len > 0);
47 DEBUG_ASSERT(ispow2(len));
48
49 cbuf->head = 0;
50 cbuf->tail = 0;
51 cbuf->len_pow2 = log2_uint(len);
52 cbuf->buf = buf;
53 event_init(&cbuf->event, false, 0);
54 spin_lock_init(&cbuf->lock);
55
56 LTRACEF("len %zd, len_pow2 %u\n", len, cbuf->len_pow2);
57 }
58
cbuf_space_avail(cbuf_t * cbuf)59 size_t cbuf_space_avail(cbuf_t *cbuf)
60 {
61 uint consumed = modpow2((uint)(cbuf->head - cbuf->tail), cbuf->len_pow2);
62 return valpow2(cbuf->len_pow2) - consumed - 1;
63 }
64
cbuf_space_used(cbuf_t * cbuf)65 size_t cbuf_space_used(cbuf_t *cbuf)
66 {
67 return modpow2((uint)(cbuf->head - cbuf->tail), cbuf->len_pow2);
68 }
69
cbuf_write(cbuf_t * cbuf,const void * _buf,size_t len,bool canreschedule)70 size_t cbuf_write(cbuf_t *cbuf, const void *_buf, size_t len, bool canreschedule)
71 {
72 const char *buf = (const char *)_buf;
73
74 LTRACEF("len %zd\n", len);
75
76 DEBUG_ASSERT(cbuf);
77 DEBUG_ASSERT(len < valpow2(cbuf->len_pow2));
78
79 spin_lock_saved_state_t state;
80 spin_lock_irqsave(&cbuf->lock, state);
81
82 size_t write_len;
83 size_t pos = 0;
84
85 while (pos < len && cbuf_space_avail(cbuf) > 0) {
86 if (cbuf->head >= cbuf->tail) {
87 if (cbuf->tail == 0) {
88 // Special case - if tail is at position 0, we can't write all
89 // the way to the end of the buffer. Otherwise, head ends up at
90 // 0, head == tail, and buffer is considered "empty" again.
91 write_len =
92 MIN(valpow2(cbuf->len_pow2) - cbuf->head - 1, len - pos);
93 } else {
94 // Write to the end of the buffer.
95 write_len =
96 MIN(valpow2(cbuf->len_pow2) - cbuf->head, len - pos);
97 }
98 } else {
99 // Write from head to tail-1.
100 write_len = MIN(cbuf->tail - cbuf->head - 1, len - pos);
101 }
102
103 // if it's full, abort and return how much we've written
104 if (write_len == 0) {
105 break;
106 }
107
108 if (NULL == buf) {
109 memset(cbuf->buf + cbuf->head, 0, write_len);
110 } else {
111 memcpy(cbuf->buf + cbuf->head, buf + pos, write_len);
112 }
113
114 cbuf->head = INC_POINTER(cbuf, cbuf->head, write_len);
115 pos += write_len;
116 }
117
118 if (cbuf->head != cbuf->tail)
119 event_signal(&cbuf->event, false);
120
121 spin_unlock_irqrestore(&cbuf->lock, state);
122
123 // XXX convert to only rescheduling if
124 if (canreschedule)
125 thread_preempt();
126
127 return pos;
128 }
129
cbuf_read(cbuf_t * cbuf,void * _buf,size_t buflen,bool block)130 size_t cbuf_read(cbuf_t *cbuf, void *_buf, size_t buflen, bool block)
131 {
132 char *buf = (char *)_buf;
133
134 DEBUG_ASSERT(cbuf);
135
136 retry:
137 // block on the cbuf outside of the lock, which may
138 // unblock us early and we'll have to double check below
139 if (block)
140 event_wait(&cbuf->event);
141
142 spin_lock_saved_state_t state;
143 spin_lock_irqsave(&cbuf->lock, state);
144
145 // see if there's data available
146 size_t ret = 0;
147 if (cbuf->tail != cbuf->head) {
148 size_t pos = 0;
149
150 // loop until we've read everything we need
151 // at most this will make two passes to deal with wraparound
152 while (pos < buflen && cbuf->tail != cbuf->head) {
153 size_t read_len;
154 if (cbuf->head > cbuf->tail) {
155 // simple case where there is no wraparound
156 read_len = MIN(cbuf->head - cbuf->tail, buflen - pos);
157 } else {
158 // read to the end of buffer in this pass
159 read_len = MIN(valpow2(cbuf->len_pow2) - cbuf->tail, buflen - pos);
160 }
161
162 // Only perform the copy if a buf was supplied
163 if (NULL != buf) {
164 memcpy(buf + pos, cbuf->buf + cbuf->tail, read_len);
165 }
166
167 cbuf->tail = INC_POINTER(cbuf, cbuf->tail, read_len);
168 pos += read_len;
169 }
170
171 if (cbuf->tail == cbuf->head) {
172 DEBUG_ASSERT(pos > 0);
173 // we've emptied the buffer, unsignal the event
174 event_unsignal(&cbuf->event);
175 }
176
177 ret = pos;
178 }
179
180 spin_unlock_irqrestore(&cbuf->lock, state);
181
182 // we apparently blocked but raced with another thread and found no data, retry
183 if (block && ret == 0)
184 goto retry;
185
186 return ret;
187 }
188
cbuf_peek(cbuf_t * cbuf,iovec_t * regions)189 size_t cbuf_peek(cbuf_t *cbuf, iovec_t *regions)
190 {
191 DEBUG_ASSERT(cbuf && regions);
192
193 spin_lock_saved_state_t state;
194 spin_lock_irqsave(&cbuf->lock, state);
195
196 size_t ret = cbuf_space_used(cbuf);
197 size_t sz = cbuf_size(cbuf);
198
199 DEBUG_ASSERT(cbuf->tail < sz);
200 DEBUG_ASSERT(ret <= sz);
201
202 regions[0].iov_base = ret ? (cbuf->buf + cbuf->tail) : NULL;
203 if (ret + cbuf->tail > sz) {
204 regions[0].iov_len = sz - cbuf->tail;
205 regions[1].iov_base = cbuf->buf;
206 regions[1].iov_len = ret - regions[0].iov_len;
207 } else {
208 regions[0].iov_len = ret;
209 regions[1].iov_base = NULL;
210 regions[1].iov_len = 0;
211 }
212
213 spin_unlock_irqrestore(&cbuf->lock, state);
214 return ret;
215 }
216
cbuf_write_char(cbuf_t * cbuf,char c,bool canreschedule)217 size_t cbuf_write_char(cbuf_t *cbuf, char c, bool canreschedule)
218 {
219 DEBUG_ASSERT(cbuf);
220
221 spin_lock_saved_state_t state;
222 spin_lock_irqsave(&cbuf->lock, state);
223
224 size_t ret = 0;
225 if (cbuf_space_avail(cbuf) > 0) {
226 cbuf->buf[cbuf->head] = c;
227
228 cbuf->head = INC_POINTER(cbuf, cbuf->head, 1);
229 ret = 1;
230
231 if (cbuf->head != cbuf->tail)
232 event_signal(&cbuf->event, canreschedule);
233 }
234
235 spin_unlock_irqrestore(&cbuf->lock, state);
236
237 return ret;
238 }
239
cbuf_read_char(cbuf_t * cbuf,char * c,bool block)240 size_t cbuf_read_char(cbuf_t *cbuf, char *c, bool block)
241 {
242 DEBUG_ASSERT(cbuf);
243 DEBUG_ASSERT(c);
244
245 retry:
246 if (block)
247 event_wait(&cbuf->event);
248
249 spin_lock_saved_state_t state;
250 spin_lock_irqsave(&cbuf->lock, state);
251
252 // see if there's data available
253 size_t ret = 0;
254 if (cbuf->tail != cbuf->head) {
255
256 *c = cbuf->buf[cbuf->tail];
257 cbuf->tail = INC_POINTER(cbuf, cbuf->tail, 1);
258
259 if (cbuf->tail == cbuf->head) {
260 // we've emptied the buffer, unsignal the event
261 event_unsignal(&cbuf->event);
262 }
263
264 ret = 1;
265 }
266
267 spin_unlock_irqrestore(&cbuf->lock, state);
268
269 if (block && ret == 0)
270 goto retry;
271
272 return ret;
273 }
274