1 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are
5  * met:
6  *     * Redistributions of source code must retain the above copyright
7  *       notice, this list of conditions and the following disclaimer.
8  *     * Redistributions in binary form must reproduce the above
9  *       copyright notice, this list of conditions and the following
10  *       disclaimer in the documentation and/or other materials provided
11  *       with the distribution.
12  *     * Neither the name of The Linux Foundation nor the names of its
13  *       contributors may be used to endorse or promote products derived
14  *       from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <stdint.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <pthread.h>
33 
34 #define LOG_TAG  "WifiHAL"
35 
36 #include <utils/Log.h>
37 
38 typedef unsigned char u8;
39 typedef uint16_t u16;
40 typedef uint32_t u32;
41 typedef uint64_t u64;
42 
43 #include "ring_buffer.h"
44 
45 enum rb_bool {
46     RB_TRUE = 0,
47     RB_FALSE = 1
48 };
49 
50 typedef struct rb_entry_s {
51     u8 *data;
52     unsigned int last_wr_index;
53     u8 full;
54 } rb_entry_t;
55 
56 typedef struct ring_buf_cb {
57     unsigned int rd_buf_no; // Current buffer number to be read from
58     unsigned int wr_buf_no; // Current buffer number to be written into
59     unsigned int cur_rd_buf_idx; // Read index within the current read buffer
60     unsigned int cur_wr_buf_idx; // Write index within the current write buffer
61     rb_entry_t *bufs; // Array of buffer pointers
62 
63     unsigned int max_num_bufs; // Maximum number of buffers that should be used
64     size_t each_buf_size; // Size of each buffer in bytes
65 
66     pthread_mutex_t rb_rw_lock;
67 
68     /* Threshold vars */
69     unsigned int num_min_bytes;
70     void (*threshold_cb)(void *);
71     void *cb_ctx;
72 
73     u32 total_bytes_written;
74     u32 total_bytes_read;
75     u32 total_bytes_overwritten;
76     u32 cur_valid_bytes;
77     enum rb_bool threshold_reached;
78 } rbc_t;
79 
80 
81 #define RB_MIN(x, y) ((x) < (y)?(x):(y))
rb_lock(pthread_mutex_t * lock)82 inline void rb_lock(pthread_mutex_t *lock)
83 {
84     int error = pthread_mutex_lock(lock);
85 
86     if (error)
87         ALOGE("Failed to acquire lock with err %d", error);
88     // TODO Handle the lock failure
89 }
90 
rb_unlock(pthread_mutex_t * lock)91 inline void rb_unlock(pthread_mutex_t *lock)
92 {
93     int error = pthread_mutex_unlock(lock);
94 
95     if (error)
96         ALOGE("Failed to release lock with err %d", error);
97     // TODO Handle the unlock failure
98 }
99 
ring_buffer_init(size_t size_of_buf,int num_bufs)100 void * ring_buffer_init(size_t size_of_buf, int num_bufs)
101 {
102     struct ring_buf_cb *rbc;
103     int status;
104 
105     rbc = (struct ring_buf_cb *)malloc(sizeof(struct ring_buf_cb));
106     if (rbc == NULL) {
107         ALOGE("Failed to alloc rbc");
108         return NULL;
109     }
110     memset(rbc, 0, sizeof(struct ring_buf_cb));
111 
112     rbc->bufs = (rb_entry_t *)malloc(num_bufs * sizeof(rb_entry_t));
113     if (rbc->bufs == NULL) {
114         free(rbc);
115         ALOGE("Failed to alloc rbc->bufs");
116         return NULL;
117     }
118     memset(rbc->bufs, 0, (num_bufs * sizeof(rb_entry_t)));
119 
120     rbc->each_buf_size = size_of_buf;
121     rbc->max_num_bufs = num_bufs;
122 
123     status = pthread_mutex_init(&rbc->rb_rw_lock, NULL);
124     if (status != 0) {
125         ALOGE("Failed to initialize rb_rw_lock");
126         // TODO handle lock initialization failure
127     }
128     rbc->threshold_reached = RB_FALSE;
129     return rbc;
130 }
131 
ring_buffer_deinit(void * ctx)132 void ring_buffer_deinit(void *ctx)
133 {
134     rbc_t *rbc = (rbc_t *)ctx;
135     int status;
136     unsigned int buf_no;
137 
138     status = pthread_mutex_destroy(&rbc->rb_rw_lock);
139     if (status != 0) {
140         ALOGE("Failed to destroy rb_rw_lock");
141         // TODO handle the lock destroy failure
142     }
143     for (buf_no = 0; buf_no < rbc->max_num_bufs; buf_no++) {
144         free(rbc->bufs[buf_no].data);
145     }
146     free(rbc->bufs);
147     free(rbc);
148 }
149 
150 /*
151  * record_length : 0  - byte boundary
152  *               : >0 - Ensures to write record_length no.of bytes to the same buffer.
153  */
rb_write(void * ctx,u8 * buf,size_t length,int overwrite,size_t record_length)154 enum rb_status rb_write (void *ctx, u8 *buf, size_t length, int overwrite,
155                          size_t record_length)
156 {
157     rbc_t *rbc = (rbc_t *)ctx;
158     unsigned int bytes_written = 0; // bytes written into rb so far
159     unsigned int push_in_rd_ptr = 0; // push required in read pointer because of
160                                      // write in current buffer
161     unsigned int total_push_in_rd_ptr = 0; // Total amount of push in read pointer in this write
162 
163     if (record_length > rbc->each_buf_size || length > rbc->each_buf_size) {
164         return RB_FAILURE;
165     }
166 
167     if (overwrite == 0) {
168         /* Check if the complete RB is full. If the current wr_buf is also
169          * full, it indicates that the complete RB is full
170          */
171         if (rbc->bufs[rbc->wr_buf_no].full == 1)
172             return RB_FULL;
173         /* Check whether record fits in current buffer */
174         if (rbc->wr_buf_no == rbc->rd_buf_no) {
175             if ((rbc->cur_wr_buf_idx == rbc->cur_rd_buf_idx) &&
176                 rbc->cur_valid_bytes) {
177                 return RB_FULL;
178             } else if (rbc->cur_wr_buf_idx < rbc->cur_rd_buf_idx) {
179                 if (record_length >
180                     (rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx)) {
181                     return RB_FULL;
182                 }
183             } else {
184                 if (record_length > (rbc->each_buf_size - rbc->cur_wr_buf_idx)) {
185                     /* Check if the next buffer is not full to write this record into
186                      * next buffer
187                      */
188                     unsigned int next_buf_no = rbc->wr_buf_no + 1;
189 
190                     if (next_buf_no >= rbc->max_num_bufs) {
191                         next_buf_no = 0;
192                     }
193                     if (rbc->bufs[next_buf_no].full == 1) {
194                         return RB_FULL;
195                     }
196                 }
197             }
198         } else if (record_length > (rbc->each_buf_size - rbc->cur_wr_buf_idx)) {
199             /* Check if the next buffer is not full to write this record into
200              * next buffer
201              */
202             unsigned int next_buf_no = rbc->wr_buf_no + 1;
203 
204             if (next_buf_no >= rbc->max_num_bufs) {
205                 next_buf_no = 0;
206             }
207             if (rbc->bufs[next_buf_no].full == 1) {
208                 return RB_FULL;
209             }
210         }
211     }
212 
213     /* Go to next buffer if the current buffer is not enough to write the
214      * complete record
215      */
216     if (record_length > (rbc->each_buf_size - rbc->cur_wr_buf_idx)) {
217         rbc->bufs[rbc->wr_buf_no].full = 1;
218         rbc->bufs[rbc->wr_buf_no].last_wr_index = rbc->cur_wr_buf_idx;
219         rbc->wr_buf_no++;
220         if (rbc->wr_buf_no == rbc->max_num_bufs) {
221             rbc->wr_buf_no = 0;
222         }
223         rbc->cur_wr_buf_idx = 0;
224     }
225 
226 
227     /* In each iteration of below loop, the data that can be fit into
228      * buffer @wr_buf_no will be copied from input buf */
229     while (bytes_written < length) {
230         unsigned int cur_copy_len;
231 
232         /* Allocate a buffer if no buf available @ wr_buf_no */
233         if (rbc->bufs[rbc->wr_buf_no].data == NULL) {
234             rbc->bufs[rbc->wr_buf_no].data = (u8 *)malloc(rbc->each_buf_size);
235             if (rbc->bufs[rbc->wr_buf_no].data == NULL) {
236                 ALOGE("Failed to alloc write buffer");
237                 return RB_RETRY;
238             }
239         }
240 
241         /* Take the minimum of the remaining length that needs to be written
242          * from buf and the maximum length that can be written into current
243          * buffer in ring buffer
244          */
245         cur_copy_len = RB_MIN((rbc->each_buf_size - rbc->cur_wr_buf_idx),
246                               (length - bytes_written));
247 
248         rb_lock(&rbc->rb_rw_lock);
249 
250         /* Push the read pointer in case of overrun */
251         if (rbc->rd_buf_no == rbc->wr_buf_no) {
252             if ((rbc->cur_rd_buf_idx > rbc->cur_wr_buf_idx) ||
253                 ((rbc->cur_rd_buf_idx == rbc->cur_wr_buf_idx) &&
254                  rbc->cur_valid_bytes)) {
255                 /* If read ptr is ahead of write pointer and if the
256                  * gap is not enough to fit the cur_copy_len bytes then
257                  * push the read pointer so that points to the start of
258                  * old bytes after this write
259                  */
260                 if ((rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx) <
261                     cur_copy_len) {
262                     push_in_rd_ptr += cur_copy_len -
263                                     (rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx);
264                     rbc->cur_rd_buf_idx = rbc->cur_wr_buf_idx + cur_copy_len;
265                     if (rbc->cur_rd_buf_idx >=
266                         rbc->bufs[rbc->rd_buf_no].last_wr_index) {
267                         rbc->cur_rd_buf_idx = 0;
268                         rbc->rd_buf_no++;
269                         if (rbc->rd_buf_no == rbc->max_num_bufs) {
270                             rbc->rd_buf_no = 0;
271                             ALOGV("Pushing read to the start of ring buffer");
272                         }
273                         /* the previous buffer might have little more empty room
274                          * after overwriting the remaining bytes
275                          */
276                         rbc->bufs[rbc->wr_buf_no].full = 0;
277                     }
278                 }
279             }
280         }
281         rb_unlock(&rbc->rb_rw_lock);
282         if(rbc->bufs[rbc->wr_buf_no].data == NULL || (rbc->bufs[rbc->wr_buf_no].data + rbc->cur_wr_buf_idx) == NULL ||
283                 buf == NULL || buf + bytes_written == NULL) {
284             ALOGE("The read or Write buffer is null");
285             return RB_FAILURE;
286         }
287         if (((bytes_written + cur_copy_len) > length
288                 || (rbc->cur_wr_buf_idx + cur_copy_len) > rbc->each_buf_size)) {
289             ALOGE("LOG_RB rb_write overflow - cur_copy_len=%d wr_buf[max=%zu no=%d idx=%d] buf[max=%zu accessed=%d]",
290               cur_copy_len, rbc->each_buf_size, rbc->wr_buf_no, rbc->cur_wr_buf_idx, length, bytes_written + cur_copy_len);
291             return RB_FAILURE;
292         }
293 
294         /* don't use lock while doing memcpy, so that we don't block the read
295          * context for too long. There is no harm while writing the memory if
296          * locking is properly done while upgrading the pointers */
297         memcpy((rbc->bufs[rbc->wr_buf_no].data + rbc->cur_wr_buf_idx),
298                (buf + bytes_written),
299                cur_copy_len);
300 
301         rb_lock(&rbc->rb_rw_lock);
302         /* Update the write idx by the amount of write done in this iteration */
303         rbc->cur_wr_buf_idx += cur_copy_len;
304         if (rbc->cur_wr_buf_idx == rbc->each_buf_size) {
305             /* Increment the wr_buf_no as the current buffer is full */
306             rbc->bufs[rbc->wr_buf_no].full = 1;
307             rbc->bufs[rbc->wr_buf_no].last_wr_index = rbc->cur_wr_buf_idx;
308             rbc->wr_buf_no++;
309             if (rbc->wr_buf_no == rbc->max_num_bufs) {
310                 ALOGV("Write rolling over to the start of ring buffer");
311                 rbc->wr_buf_no = 0;
312             }
313             /* Reset the write index to zero as this is a new buffer */
314             rbc->cur_wr_buf_idx = 0;
315         }
316 
317         if ((rbc->cur_valid_bytes + (cur_copy_len - push_in_rd_ptr)) >
318             (rbc->max_num_bufs * rbc->each_buf_size)) {
319             /* The below is only a precautionary print and ideally should never
320              * come */
321             ALOGE("Something going wrong in ring buffer");
322         } else {
323             /* Increase the valid bytes count by number of bytes written without
324              * overwriting the old bytes */
325             rbc->cur_valid_bytes += cur_copy_len - push_in_rd_ptr;
326         }
327         total_push_in_rd_ptr += push_in_rd_ptr;
328         push_in_rd_ptr = 0;
329         rb_unlock(&rbc->rb_rw_lock);
330         bytes_written += cur_copy_len;
331     }
332 
333     rb_lock(&rbc->rb_rw_lock);
334     rbc->total_bytes_written += bytes_written - total_push_in_rd_ptr;
335     rbc->total_bytes_overwritten += total_push_in_rd_ptr;
336 
337     /* check if valid bytes is going more than threshold */
338     if ((rbc->threshold_reached == RB_FALSE) &&
339         (rbc->cur_valid_bytes >= rbc->num_min_bytes) &&
340         ((length == record_length) || !record_length) &&
341         rbc->threshold_cb) {
342         /* Release the lock before calling threshold_cb as it might call rb_read
343          * in this same context in order to avoid dead lock
344          */
345         rbc->threshold_reached = RB_TRUE;
346         rb_unlock(&rbc->rb_rw_lock);
347         rbc->threshold_cb(rbc->cb_ctx);
348     } else {
349         rb_unlock(&rbc->rb_rw_lock);
350     }
351     return RB_SUCCESS;
352 }
353 
rb_read(void * ctx,u8 * buf,size_t max_length)354 size_t rb_read (void *ctx, u8 *buf, size_t max_length)
355 {
356     rbc_t *rbc = (rbc_t *)ctx;
357     unsigned int bytes_read = 0;
358     unsigned int no_more_bytes_available = 0;
359 
360     rb_lock(&rbc->rb_rw_lock);
361     while (bytes_read < max_length) {
362         unsigned int cur_cpy_len;
363 
364         if (rbc->bufs[rbc->rd_buf_no].data == NULL) {
365             break;
366         }
367 
368         /* if read and write are on same buffer, work with rd, wr indices */
369         if (rbc->rd_buf_no == rbc->wr_buf_no) {
370             if (rbc->cur_rd_buf_idx < rbc->cur_wr_buf_idx) {
371                 /* Check if all the required bytes are available, if not
372                  * read only the available bytes in the current buffer and
373                  * break out after reading current buffer
374                  */
375                 if ((rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx) <
376                         (max_length - bytes_read)) {
377                     cur_cpy_len = rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx;
378                     no_more_bytes_available = 1;
379                 } else {
380                     cur_cpy_len = max_length - bytes_read;
381                 }
382             } else {
383                 /* When there are no bytes available to read cur_rd_buf_idx
384                  * will be euqal to cur_wr_buf_idx. Handle this scenario using
385                  * cur_valid_bytes */
386                 if (rbc->cur_valid_bytes <= bytes_read) {
387                     /* Suppress possible static analyzer's warning */
388                     cur_cpy_len = 0;
389                     break;
390                 }
391                 cur_cpy_len = RB_MIN((rbc->each_buf_size - rbc->cur_rd_buf_idx),
392                                      (max_length - bytes_read));
393             }
394         } else {
395             /* Check if all remaining_length bytes can be read from this
396              * buffer, if not read only the available bytes in the current
397              * buffer and go to next buffer using the while loop.
398              */
399             cur_cpy_len = RB_MIN((rbc->each_buf_size - rbc->cur_rd_buf_idx),
400                                  (max_length - bytes_read));
401         }
402 
403         memcpy((buf + bytes_read),
404                (rbc->bufs[rbc->rd_buf_no].data + rbc->cur_rd_buf_idx),
405                cur_cpy_len);
406 
407         /* Update the read index */
408         rbc->cur_rd_buf_idx += cur_cpy_len;
409         if (rbc->cur_rd_buf_idx == rbc->each_buf_size) {
410             /* Increment rd_buf_no as the current buffer is completely read */
411             if (rbc->rd_buf_no != rbc->wr_buf_no) {
412                 free(rbc->bufs[rbc->rd_buf_no].data);
413                 rbc->bufs[rbc->rd_buf_no].data = NULL;
414             }
415             rbc->rd_buf_no++;
416             if (rbc->rd_buf_no == rbc->max_num_bufs) {
417                 ALOGV("Read rolling over to the start of ring buffer");
418                 rbc->rd_buf_no = 0;
419             }
420             /* Reset the read index as this is a new buffer */
421             rbc->cur_rd_buf_idx = 0;
422         }
423 
424         bytes_read += cur_cpy_len;
425         if (no_more_bytes_available) {
426             break;
427         }
428     }
429 
430     rbc->total_bytes_read += bytes_read;
431     if (rbc->cur_valid_bytes < bytes_read) {
432         /* The below is only a precautionary print and ideally should never
433          * come */
434         ALOGE("Something going wrong in ring buffer");
435     } else {
436         rbc->cur_valid_bytes -= bytes_read;
437     }
438 
439     /* check if valid bytes is going less than threshold */
440     if (rbc->threshold_reached == RB_TRUE) {
441         if (rbc->cur_valid_bytes < rbc->num_min_bytes) {
442             rbc->threshold_reached = RB_FALSE;
443         }
444     }
445     rb_unlock(&rbc->rb_rw_lock);
446     return bytes_read;
447 }
448 
rb_get_read_buf(void * ctx,size_t * length)449 u8 *rb_get_read_buf(void *ctx, size_t *length)
450 {
451     rbc_t *rbc = (rbc_t *)ctx;
452     unsigned int cur_read_len = 0;
453     u8 *buf;
454 
455     /* If no buffer is available for reading */
456     if (!rbc || rbc->bufs[rbc->rd_buf_no].data == NULL) {
457         *length = 0;
458         return NULL;
459     }
460 
461     rb_lock(&rbc->rb_rw_lock);
462     if ((rbc->bufs[rbc->rd_buf_no].full == 1) &&
463         (rbc->cur_rd_buf_idx == rbc->bufs[rbc->rd_buf_no].last_wr_index)) {
464         if (rbc->wr_buf_no != rbc->rd_buf_no) {
465             free(rbc->bufs[rbc->rd_buf_no].data);
466             rbc->bufs[rbc->rd_buf_no].data = NULL;
467         }
468         rbc->bufs[rbc->rd_buf_no].full = 0;
469         rbc->rd_buf_no++;
470         if (rbc->rd_buf_no == rbc->max_num_bufs) {
471             rbc->rd_buf_no = 0;
472         }
473         rbc->cur_rd_buf_idx = 0;
474     }
475 
476     if (rbc->wr_buf_no == rbc->rd_buf_no) {
477         /* If read and write are happening on the same buffer currently, use
478          * rd and wr indices within the buffer */
479         if ((rbc->cur_rd_buf_idx == rbc->cur_wr_buf_idx) &&
480             (rbc->cur_valid_bytes == 0)) {
481             /* No bytes available for reading */
482             *length = 0;
483             rb_unlock(&rbc->rb_rw_lock);
484             return NULL;
485         } else if (rbc->cur_rd_buf_idx < rbc->cur_wr_buf_idx) {
486             /* write is just ahead of read in this buffer */
487             cur_read_len = rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx;
488         } else {
489             /* write is rolled over and just behind the read */
490             if (rbc->bufs[rbc->rd_buf_no].last_wr_index >= rbc->cur_rd_buf_idx) {
491                 cur_read_len = rbc->bufs[rbc->rd_buf_no].last_wr_index - rbc->cur_rd_buf_idx;
492             } else {
493                 ALOGE("Alert: cur_read_len=%u invalid, rd_buf[no=%d rd_idx=%d wr_index=%d]",cur_read_len, rbc->rd_buf_no, rbc->cur_rd_buf_idx, rbc->bufs[rbc->rd_buf_no].last_wr_index);
494                 return NULL;
495             }
496         }
497     } else {
498         if (rbc->cur_rd_buf_idx == 0) {
499             /* The complete buffer can be read out */
500             cur_read_len = rbc->bufs[rbc->rd_buf_no].last_wr_index;
501         } else {
502             /* Read the remaining bytes in this buffer */
503             cur_read_len = rbc->bufs[rbc->rd_buf_no].last_wr_index - rbc->cur_rd_buf_idx;
504         }
505     }
506 
507     if ((rbc->bufs[rbc->rd_buf_no].full == 1) &&
508          (rbc->cur_rd_buf_idx == 0)) {
509         /* Pluck out the complete buffer and send it out */
510         buf = rbc->bufs[rbc->rd_buf_no].data;
511         rbc->bufs[rbc->rd_buf_no].data = NULL;
512 
513         /* Move to the next buffer */
514         rbc->bufs[rbc->rd_buf_no].full = 0;
515         rbc->rd_buf_no++;
516         if (rbc->rd_buf_no == rbc->max_num_bufs) {
517             ALOGV("Read rolling over to the start of ring buffer");
518             rbc->rd_buf_no = 0;
519         }
520     } else {
521         /* We cannot give out the complete buffer, so allocate a new memory and
522          * and copy the data into it.
523          */
524         buf = (u8 *)malloc(cur_read_len);
525         if (buf == NULL) {
526             ALOGE("Failed to alloc buffer for partial buf read");
527             *length = 0;
528             rb_unlock(&rbc->rb_rw_lock);
529             return NULL;
530         }
531         memcpy(buf,
532                (rbc->bufs[rbc->rd_buf_no].data + rbc->cur_rd_buf_idx),
533                cur_read_len);
534 
535         /* Update the read index */
536         if (rbc->bufs[rbc->rd_buf_no].full == 1) {
537             if (rbc->wr_buf_no != rbc->rd_buf_no) {
538                 free(rbc->bufs[rbc->rd_buf_no].data);
539                 rbc->bufs[rbc->rd_buf_no].data = NULL;
540             }
541             rbc->bufs[rbc->rd_buf_no].full = 0;
542             rbc->rd_buf_no++;
543             if (rbc->rd_buf_no == rbc->max_num_bufs) {
544                 rbc->rd_buf_no = 0;
545             }
546             rbc->cur_rd_buf_idx = 0;
547         } else {
548             rbc->cur_rd_buf_idx += cur_read_len;
549         }
550     }
551 
552     rbc->total_bytes_read += cur_read_len;
553     if (rbc->cur_valid_bytes < cur_read_len) {
554         /* The below is only a precautionary print and ideally should never
555          * come */
556         ALOGE("Something going wrong in ring buffer");
557     } else {
558         rbc->cur_valid_bytes -= cur_read_len;
559     }
560 
561     /* check if valid bytes is going less than threshold */
562     if (rbc->threshold_reached == RB_TRUE) {
563         if (rbc->cur_valid_bytes < rbc->num_min_bytes) {
564             rbc->threshold_reached = RB_FALSE;
565         }
566     }
567     rb_unlock(&rbc->rb_rw_lock);
568 
569     *length = cur_read_len;
570     return buf;
571 }
572 
rb_config_threshold(void * ctx,unsigned int num_min_bytes,threshold_call_back callback,void * cb_ctx)573 void rb_config_threshold(void *ctx,
574                          unsigned int num_min_bytes,
575                          threshold_call_back callback,
576                          void *cb_ctx)
577 {
578     rbc_t *rbc = (rbc_t *)ctx;
579 
580     rbc->num_min_bytes = num_min_bytes;
581     rbc->threshold_cb = callback;
582     rbc->cb_ctx = cb_ctx;
583 }
584 
rb_get_stats(void * ctx,struct rb_stats * rbs)585 void rb_get_stats(void *ctx, struct rb_stats *rbs)
586 {
587     rbc_t *rbc = (rbc_t *)ctx;
588 
589     rbs->total_bytes_written = rbc->total_bytes_written;
590     rbs->total_bytes_read = rbc->total_bytes_read;
591     rbs->cur_valid_bytes = rbc->cur_valid_bytes;
592     rbs->each_buf_size = rbc->each_buf_size;
593     rbs->max_num_bufs = rbc->max_num_bufs;
594 }
595