1 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are
5  * met:
6  *     * Redistributions of source code must retain the above copyright
7  *       notice, this list of conditions and the following disclaimer.
8  *     * Redistributions in binary form must reproduce the above
9  *       copyright notice, this list of conditions and the following
10  *       disclaimer in the documentation and/or other materials provided
11  *       with the distribution.
12  *     * Neither the name of The Linux Foundation nor the names of its
13  *       contributors may be used to endorse or promote products derived
14  *       from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <stdint.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <pthread.h>
33 
34 #define LOG_TAG  "WifiHAL"
35 
36 #include <utils/Log.h>
37 
38 typedef unsigned char u8;
39 typedef uint16_t u16;
40 typedef uint32_t u32;
41 typedef uint64_t u64;
42 
43 #include "ring_buffer.h"
44 
45 enum rb_bool {
46     RB_TRUE = 0,
47     RB_FALSE = 1
48 };
49 
50 typedef struct rb_entry_s {
51     u8 *data;
52     unsigned int last_wr_index;
53     u8 full;
54 } rb_entry_t;
55 
56 typedef struct ring_buf_cb {
57     unsigned int rd_buf_no; // Current buffer number to be read from
58     unsigned int wr_buf_no; // Current buffer number to be written into
59     unsigned int cur_rd_buf_idx; // Read index within the current read buffer
60     unsigned int cur_wr_buf_idx; // Write index within the current write buffer
61     rb_entry_t *bufs; // Array of buffer pointers
62 
63     unsigned int max_num_bufs; // Maximum number of buffers that should be used
64     size_t each_buf_size; // Size of each buffer in bytes
65 
66     pthread_mutex_t rb_rw_lock;
67 
68     /* Threshold vars */
69     unsigned int num_min_bytes;
70     void (*threshold_cb)(void *);
71     void *cb_ctx;
72 
73     u32 total_bytes_written;
74     u32 total_bytes_read;
75     u32 total_bytes_overwritten;
76     u32 cur_valid_bytes;
77     enum rb_bool threshold_reached;
78 } rbc_t;
79 
80 
81 #define RB_MIN(x, y) ((x) < (y)?(x):(y))
rb_lock(pthread_mutex_t * lock)82 inline void rb_lock(pthread_mutex_t *lock)
83 {
84     int error = pthread_mutex_lock(lock);
85 
86     if (error)
87         ALOGE("Failed to acquire lock with err %d", error);
88     // TODO Handle the lock failure
89 }
90 
rb_unlock(pthread_mutex_t * lock)91 inline void rb_unlock(pthread_mutex_t *lock)
92 {
93     int error = pthread_mutex_unlock(lock);
94 
95     if (error)
96         ALOGE("Failed to release lock with err %d", error);
97     // TODO Handle the unlock failure
98 }
99 
ring_buffer_init(size_t size_of_buf,int num_bufs)100 void * ring_buffer_init(size_t size_of_buf, int num_bufs)
101 {
102     struct ring_buf_cb *rbc;
103     int status;
104 
105     rbc = (struct ring_buf_cb *)malloc(sizeof(struct ring_buf_cb));
106     if (rbc == NULL) {
107         ALOGE("Failed to alloc rbc");
108         return NULL;
109     }
110     memset(rbc, 0, sizeof(struct ring_buf_cb));
111 
112     rbc->bufs = (rb_entry_t *)malloc(num_bufs * sizeof(rb_entry_t));
113     if (rbc->bufs == NULL) {
114         free(rbc);
115         ALOGE("Failed to alloc rbc->bufs");
116         return NULL;
117     }
118     memset(rbc->bufs, 0, (num_bufs * sizeof(rb_entry_t)));
119 
120     rbc->each_buf_size = size_of_buf;
121     rbc->max_num_bufs = num_bufs;
122 
123     status = pthread_mutex_init(&rbc->rb_rw_lock, NULL);
124     if (status != 0) {
125         ALOGE("Failed to initialize rb_rw_lock");
126         // TODO handle lock initialization failure
127     }
128     rbc->threshold_reached = RB_FALSE;
129     return rbc;
130 }
131 
ring_buffer_deinit(void * ctx)132 void ring_buffer_deinit(void *ctx)
133 {
134     rbc_t *rbc = (rbc_t *)ctx;
135     int status;
136     unsigned int buf_no;
137 
138     status = pthread_mutex_destroy(&rbc->rb_rw_lock);
139     if (status != 0) {
140         ALOGE("Failed to destroy rb_rw_lock");
141         // TODO handle the lock destroy failure
142     }
143     for (buf_no = 0; buf_no < rbc->max_num_bufs; buf_no++) {
144         free(rbc->bufs[buf_no].data);
145     }
146     free(rbc->bufs);
147     free(rbc);
148 }
149 
150 /*
151  * record_length : 0  - byte boundary
152  *               : >0 - Ensures to write record_length no.of bytes to the same buffer.
153  */
rb_write(void * ctx,u8 * buf,size_t length,int overwrite,size_t record_length)154 enum rb_status rb_write (void *ctx, u8 *buf, size_t length, int overwrite,
155                          size_t record_length)
156 {
157     rbc_t *rbc = (rbc_t *)ctx;
158     unsigned int bytes_written = 0; // bytes written into rb so far
159     unsigned int push_in_rd_ptr = 0; // push required in read pointer because of
160                                      // write in current buffer
161     unsigned int total_push_in_rd_ptr = 0; // Total amount of push in read pointer in this write
162 
163     if (record_length > rbc->each_buf_size) {
164         return RB_FAILURE;
165     }
166 
167     if (overwrite == 0) {
168         /* Check if the complete RB is full. If the current wr_buf is also
169          * full, it indicates that the complete RB is full
170          */
171         if (rbc->bufs[rbc->wr_buf_no].full == 1)
172             return RB_FULL;
173         /* Check whether record fits in current buffer */
174         if (rbc->wr_buf_no == rbc->rd_buf_no) {
175             if ((rbc->cur_wr_buf_idx == rbc->cur_rd_buf_idx) &&
176                 rbc->cur_valid_bytes) {
177                 return RB_FULL;
178             } else if (rbc->cur_wr_buf_idx < rbc->cur_rd_buf_idx) {
179                 if (record_length >
180                     (rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx)) {
181                     return RB_FULL;
182                 }
183             } else {
184                 if (record_length > (rbc->each_buf_size - rbc->cur_wr_buf_idx)) {
185                     /* Check if the next buffer is not full to write this record into
186                      * next buffer
187                      */
188                     unsigned int next_buf_no = rbc->wr_buf_no + 1;
189 
190                     if (next_buf_no >= rbc->max_num_bufs) {
191                         next_buf_no = 0;
192                     }
193                     if (rbc->bufs[next_buf_no].full == 1) {
194                         return RB_FULL;
195                     }
196                 }
197             }
198         } else if (record_length > (rbc->each_buf_size - rbc->cur_wr_buf_idx)) {
199             /* Check if the next buffer is not full to write this record into
200              * next buffer
201              */
202             unsigned int next_buf_no = rbc->wr_buf_no + 1;
203 
204             if (next_buf_no >= rbc->max_num_bufs) {
205                 next_buf_no = 0;
206             }
207             if (rbc->bufs[next_buf_no].full == 1) {
208                 return RB_FULL;
209             }
210         }
211     }
212 
213     /* Go to next buffer if the current buffer is not enough to write the
214      * complete record
215      */
216     if (record_length > (rbc->each_buf_size - rbc->cur_wr_buf_idx)) {
217         rbc->bufs[rbc->wr_buf_no].full = 1;
218         rbc->bufs[rbc->wr_buf_no].last_wr_index = rbc->cur_wr_buf_idx;
219         rbc->wr_buf_no++;
220         if (rbc->wr_buf_no == rbc->max_num_bufs) {
221             rbc->wr_buf_no = 0;
222         }
223         rbc->cur_wr_buf_idx = 0;
224     }
225 
226 
227     /* In each iteration of below loop, the data that can be fit into
228      * buffer @wr_buf_no will be copied from input buf */
229     while (bytes_written < length) {
230         unsigned int cur_copy_len;
231 
232         /* Allocate a buffer if no buf available @ wr_buf_no */
233         if (rbc->bufs[rbc->wr_buf_no].data == NULL) {
234             rbc->bufs[rbc->wr_buf_no].data = (u8 *)malloc(rbc->each_buf_size);
235             if (rbc->bufs[rbc->wr_buf_no].data == NULL) {
236                 ALOGE("Failed to alloc write buffer");
237                 return RB_RETRY;
238             }
239         }
240 
241         /* Take the minimum of the remaining length that needs to be written
242          * from buf and the maximum length that can be written into current
243          * buffer in ring buffer
244          */
245         cur_copy_len = RB_MIN((rbc->each_buf_size - rbc->cur_wr_buf_idx),
246                               (length - bytes_written));
247 
248         rb_lock(&rbc->rb_rw_lock);
249 
250         /* Push the read pointer in case of overrun */
251         if (rbc->rd_buf_no == rbc->wr_buf_no) {
252             if ((rbc->cur_rd_buf_idx > rbc->cur_wr_buf_idx) ||
253                 ((rbc->cur_rd_buf_idx == rbc->cur_wr_buf_idx) &&
254                  rbc->cur_valid_bytes)) {
255                 /* If read ptr is ahead of write pointer and if the
256                  * gap is not enough to fit the cur_copy_len bytes then
257                  * push the read pointer so that points to the start of
258                  * old bytes after this write
259                  */
260                 if ((rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx) <
261                     cur_copy_len) {
262                     push_in_rd_ptr += cur_copy_len -
263                                     (rbc->cur_rd_buf_idx - rbc->cur_wr_buf_idx);
264                     rbc->cur_rd_buf_idx = rbc->cur_wr_buf_idx + cur_copy_len;
265                     if (rbc->cur_rd_buf_idx >=
266                         rbc->bufs[rbc->rd_buf_no].last_wr_index) {
267                         rbc->cur_rd_buf_idx = 0;
268                         rbc->rd_buf_no++;
269                         if (rbc->rd_buf_no == rbc->max_num_bufs) {
270                             rbc->rd_buf_no = 0;
271                             ALOGV("Pushing read to the start of ring buffer");
272                         }
273                         /* the previous buffer might have little more empty room
274                          * after overwriting the remaining bytes
275                          */
276                         rbc->bufs[rbc->wr_buf_no].full = 0;
277                     }
278                 }
279             }
280         }
281         rb_unlock(&rbc->rb_rw_lock);
282 
283         /* don't use lock while doing memcpy, so that we don't block the read
284          * context for too long. There is no harm while writing the memory if
285          * locking is properly done while upgrading the pointers */
286         memcpy((rbc->bufs[rbc->wr_buf_no].data + rbc->cur_wr_buf_idx),
287                (buf + bytes_written),
288                cur_copy_len);
289 
290         rb_lock(&rbc->rb_rw_lock);
291         /* Update the write idx by the amount of write done in this iteration */
292         rbc->cur_wr_buf_idx += cur_copy_len;
293         if (rbc->cur_wr_buf_idx == rbc->each_buf_size) {
294             /* Increment the wr_buf_no as the current buffer is full */
295             rbc->bufs[rbc->wr_buf_no].full = 1;
296             rbc->bufs[rbc->wr_buf_no].last_wr_index = rbc->cur_wr_buf_idx;
297             rbc->wr_buf_no++;
298             if (rbc->wr_buf_no == rbc->max_num_bufs) {
299                 ALOGV("Write rolling over to the start of ring buffer");
300                 rbc->wr_buf_no = 0;
301             }
302             /* Reset the write index to zero as this is a new buffer */
303             rbc->cur_wr_buf_idx = 0;
304         }
305 
306         if ((rbc->cur_valid_bytes + (cur_copy_len - push_in_rd_ptr)) >
307             (rbc->max_num_bufs * rbc->each_buf_size)) {
308             /* The below is only a precautionary print and ideally should never
309              * come */
310             ALOGE("Something going wrong in ring buffer");
311         } else {
312             /* Increase the valid bytes count by number of bytes written without
313              * overwriting the old bytes */
314             rbc->cur_valid_bytes += cur_copy_len - push_in_rd_ptr;
315         }
316         total_push_in_rd_ptr += push_in_rd_ptr;
317         push_in_rd_ptr = 0;
318         rb_unlock(&rbc->rb_rw_lock);
319         bytes_written += cur_copy_len;
320     }
321 
322     rb_lock(&rbc->rb_rw_lock);
323     rbc->total_bytes_written += bytes_written - total_push_in_rd_ptr;
324     rbc->total_bytes_overwritten += total_push_in_rd_ptr;
325 
326     /* check if valid bytes is going more than threshold */
327     if ((rbc->threshold_reached == RB_FALSE) &&
328         (rbc->cur_valid_bytes >= rbc->num_min_bytes) &&
329         ((length == record_length) || !record_length) &&
330         rbc->threshold_cb) {
331         /* Release the lock before calling threshold_cb as it might call rb_read
332          * in this same context in order to avoid dead lock
333          */
334         rbc->threshold_reached = RB_TRUE;
335         rb_unlock(&rbc->rb_rw_lock);
336         rbc->threshold_cb(rbc->cb_ctx);
337     } else {
338         rb_unlock(&rbc->rb_rw_lock);
339     }
340     return RB_SUCCESS;
341 }
342 
rb_read(void * ctx,u8 * buf,size_t max_length)343 size_t rb_read (void *ctx, u8 *buf, size_t max_length)
344 {
345     rbc_t *rbc = (rbc_t *)ctx;
346     unsigned int bytes_read = 0;
347     unsigned int no_more_bytes_available = 0;
348 
349     rb_lock(&rbc->rb_rw_lock);
350     while (bytes_read < max_length) {
351         unsigned int cur_cpy_len;
352 
353         if (rbc->bufs[rbc->rd_buf_no].data == NULL) {
354             break;
355         }
356 
357         /* if read and write are on same buffer, work with rd, wr indices */
358         if (rbc->rd_buf_no == rbc->wr_buf_no) {
359             if (rbc->cur_rd_buf_idx < rbc->cur_wr_buf_idx) {
360                 /* Check if all the required bytes are available, if not
361                  * read only the available bytes in the current buffer and
362                  * break out after reading current buffer
363                  */
364                 if ((rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx) <
365                         (max_length - bytes_read)) {
366                     cur_cpy_len = rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx;
367                     no_more_bytes_available = 1;
368                 } else {
369                     cur_cpy_len = max_length - bytes_read;
370                 }
371             } else {
372                 /* When there are no bytes available to read cur_rd_buf_idx
373                  * will be euqal to cur_wr_buf_idx. Handle this scenario using
374                  * cur_valid_bytes */
375                 if (rbc->cur_valid_bytes <= bytes_read) {
376                     /* Suppress possible static analyzer's warning */
377                     cur_cpy_len = 0;
378                     break;
379                 }
380                 cur_cpy_len = RB_MIN((rbc->each_buf_size - rbc->cur_rd_buf_idx),
381                                      (max_length - bytes_read));
382             }
383         } else {
384             /* Check if all remaining_length bytes can be read from this
385              * buffer, if not read only the available bytes in the current
386              * buffer and go to next buffer using the while loop.
387              */
388             cur_cpy_len = RB_MIN((rbc->each_buf_size - rbc->cur_rd_buf_idx),
389                                  (max_length - bytes_read));
390         }
391 
392         memcpy((buf + bytes_read),
393                (rbc->bufs[rbc->rd_buf_no].data + rbc->cur_rd_buf_idx),
394                cur_cpy_len);
395 
396         /* Update the read index */
397         rbc->cur_rd_buf_idx += cur_cpy_len;
398         if (rbc->cur_rd_buf_idx == rbc->each_buf_size) {
399             /* Increment rd_buf_no as the current buffer is completely read */
400             if (rbc->rd_buf_no != rbc->wr_buf_no) {
401                 free(rbc->bufs[rbc->rd_buf_no].data);
402                 rbc->bufs[rbc->rd_buf_no].data = NULL;
403             }
404             rbc->rd_buf_no++;
405             if (rbc->rd_buf_no == rbc->max_num_bufs) {
406                 ALOGV("Read rolling over to the start of ring buffer");
407                 rbc->rd_buf_no = 0;
408             }
409             /* Reset the read index as this is a new buffer */
410             rbc->cur_rd_buf_idx = 0;
411         }
412 
413         bytes_read += cur_cpy_len;
414         if (no_more_bytes_available) {
415             break;
416         }
417     }
418 
419     rbc->total_bytes_read += bytes_read;
420     if (rbc->cur_valid_bytes < bytes_read) {
421         /* The below is only a precautionary print and ideally should never
422          * come */
423         ALOGE("Something going wrong in ring buffer");
424     } else {
425         rbc->cur_valid_bytes -= bytes_read;
426     }
427 
428     /* check if valid bytes is going less than threshold */
429     if (rbc->threshold_reached == RB_TRUE) {
430         if (rbc->cur_valid_bytes < rbc->num_min_bytes) {
431             rbc->threshold_reached = RB_FALSE;
432         }
433     }
434     rb_unlock(&rbc->rb_rw_lock);
435     return bytes_read;
436 }
437 
rb_get_read_buf(void * ctx,size_t * length)438 u8 *rb_get_read_buf(void *ctx, size_t *length)
439 {
440     rbc_t *rbc = (rbc_t *)ctx;
441     unsigned int cur_read_len = 0;
442     u8 *buf;
443 
444     /* If no buffer is available for reading */
445     if (!rbc || rbc->bufs[rbc->rd_buf_no].data == NULL) {
446         *length = 0;
447         return NULL;
448     }
449 
450     rb_lock(&rbc->rb_rw_lock);
451     if ((rbc->bufs[rbc->rd_buf_no].full == 1) &&
452         (rbc->cur_rd_buf_idx == rbc->bufs[rbc->rd_buf_no].last_wr_index)) {
453         if (rbc->wr_buf_no != rbc->rd_buf_no) {
454             free(rbc->bufs[rbc->rd_buf_no].data);
455             rbc->bufs[rbc->rd_buf_no].data = NULL;
456         }
457         rbc->bufs[rbc->rd_buf_no].full = 0;
458         rbc->rd_buf_no++;
459         if (rbc->rd_buf_no == rbc->max_num_bufs) {
460             rbc->rd_buf_no = 0;
461         }
462         rbc->cur_rd_buf_idx = 0;
463     }
464 
465     if (rbc->wr_buf_no == rbc->rd_buf_no) {
466         /* If read and write are happening on the same buffer currently, use
467          * rd and wr indices within the buffer */
468         if ((rbc->cur_rd_buf_idx == rbc->cur_wr_buf_idx) &&
469             (rbc->cur_valid_bytes == 0)) {
470             /* No bytes available for reading */
471             *length = 0;
472             rb_unlock(&rbc->rb_rw_lock);
473             return NULL;
474         } else if (rbc->cur_rd_buf_idx < rbc->cur_wr_buf_idx) {
475             /* write is just ahead of read in this buffer */
476             cur_read_len = rbc->cur_wr_buf_idx - rbc->cur_rd_buf_idx;
477         } else {
478             /* write is rolled over and just behind the read */
479             cur_read_len = rbc->bufs[rbc->rd_buf_no].last_wr_index - rbc->cur_rd_buf_idx;
480         }
481     } else {
482         if (rbc->cur_rd_buf_idx == 0) {
483             /* The complete buffer can be read out */
484             cur_read_len = rbc->bufs[rbc->rd_buf_no].last_wr_index;
485         } else {
486             /* Read the remaining bytes in this buffer */
487             cur_read_len = rbc->bufs[rbc->rd_buf_no].last_wr_index - rbc->cur_rd_buf_idx;
488         }
489     }
490 
491     if ((rbc->bufs[rbc->rd_buf_no].full == 1) &&
492          (rbc->cur_rd_buf_idx == 0)) {
493         /* Pluck out the complete buffer and send it out */
494         buf = rbc->bufs[rbc->rd_buf_no].data;
495         rbc->bufs[rbc->rd_buf_no].data = NULL;
496 
497         /* Move to the next buffer */
498         rbc->bufs[rbc->rd_buf_no].full = 0;
499         rbc->rd_buf_no++;
500         if (rbc->rd_buf_no == rbc->max_num_bufs) {
501             ALOGV("Read rolling over to the start of ring buffer");
502             rbc->rd_buf_no = 0;
503         }
504     } else {
505         /* We cannot give out the complete buffer, so allocate a new memory and
506          * and copy the data into it.
507          */
508         buf = (u8 *)malloc(cur_read_len);
509         if (buf == NULL) {
510             ALOGE("Failed to alloc buffer for partial buf read");
511             *length = 0;
512             rb_unlock(&rbc->rb_rw_lock);
513             return NULL;
514         }
515         memcpy(buf,
516                (rbc->bufs[rbc->rd_buf_no].data + rbc->cur_rd_buf_idx),
517                cur_read_len);
518 
519         /* Update the read index */
520         if (rbc->bufs[rbc->rd_buf_no].full == 1) {
521             if (rbc->wr_buf_no != rbc->rd_buf_no) {
522                 free(rbc->bufs[rbc->rd_buf_no].data);
523                 rbc->bufs[rbc->rd_buf_no].data = NULL;
524             }
525             rbc->bufs[rbc->rd_buf_no].full = 0;
526             rbc->rd_buf_no++;
527             if (rbc->rd_buf_no == rbc->max_num_bufs) {
528                 rbc->rd_buf_no = 0;
529             }
530             rbc->cur_rd_buf_idx = 0;
531         } else {
532             rbc->cur_rd_buf_idx += cur_read_len;
533         }
534     }
535 
536     rbc->total_bytes_read += cur_read_len;
537     if (rbc->cur_valid_bytes < cur_read_len) {
538         /* The below is only a precautionary print and ideally should never
539          * come */
540         ALOGE("Something going wrong in ring buffer");
541     } else {
542         rbc->cur_valid_bytes -= cur_read_len;
543     }
544 
545     /* check if valid bytes is going less than threshold */
546     if (rbc->threshold_reached == RB_TRUE) {
547         if (rbc->cur_valid_bytes < rbc->num_min_bytes) {
548             rbc->threshold_reached = RB_FALSE;
549         }
550     }
551     rb_unlock(&rbc->rb_rw_lock);
552 
553     *length = cur_read_len;
554     return buf;
555 }
556 
rb_config_threshold(void * ctx,unsigned int num_min_bytes,threshold_call_back callback,void * cb_ctx)557 void rb_config_threshold(void *ctx,
558                          unsigned int num_min_bytes,
559                          threshold_call_back callback,
560                          void *cb_ctx)
561 {
562     rbc_t *rbc = (rbc_t *)ctx;
563 
564     rbc->num_min_bytes = num_min_bytes;
565     rbc->threshold_cb = callback;
566     rbc->cb_ctx = cb_ctx;
567 }
568 
rb_get_stats(void * ctx,struct rb_stats * rbs)569 void rb_get_stats(void *ctx, struct rb_stats *rbs)
570 {
571     rbc_t *rbc = (rbc_t *)ctx;
572 
573     rbs->total_bytes_written = rbc->total_bytes_written;
574     rbs->total_bytes_read = rbc->total_bytes_read;
575     rbs->cur_valid_bytes = rbc->cur_valid_bytes;
576     rbs->each_buf_size = rbc->each_buf_size;
577     rbs->max_num_bufs = rbc->max_num_bufs;
578 }
579