1 // Copyright 2018 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "android/base/ring_buffer.h"
15
16 #include <errno.h>
17 #include <string.h>
18 #ifdef _MSC_VER
19 #include "msvc-posix.h"
20 #else
21 #include <sys/time.h>
22 #endif
23
24 #if (defined(__i386__) || defined(__x86_64__))
25 #define RING_BUFFER_X86 1
26 #else
27 #define RING_BUFFER_X86 0
28 #endif
29
30 #if RING_BUFFER_X86
31 #include <emmintrin.h>
32 #endif
33
34 #ifdef _WIN32
35 #include <windows.h>
36 #else
37 #include <sched.h>
38 #include <unistd.h>
39 #endif
40
41 #define RING_BUFFER_MASK (RING_BUFFER_SIZE - 1)
42
43 #define RING_BUFFER_VERSION 1
44
ring_buffer_pause()45 static inline void ring_buffer_pause() {
46 #if RING_BUFFER_X86
47 _mm_pause();
48 #else
49 // TODO(lfy) analog of pause on ARM
50 #endif
51 }
52
ring_buffer_init(struct ring_buffer * r)53 void ring_buffer_init(struct ring_buffer* r) {
54 r->guest_version = 1;
55 r->write_pos = 0;
56 r->read_pos = 0;
57
58 r->read_live_count = 0;
59 r->read_yield_count = 0;
60 r->read_sleep_us_count = 0;
61
62 r->state = 0;
63 }
64
get_ring_pos(uint32_t index)65 static uint32_t get_ring_pos(uint32_t index) {
66 return index & RING_BUFFER_MASK;
67 }
68
ring_buffer_can_write(const struct ring_buffer * r,uint32_t bytes)69 bool ring_buffer_can_write(const struct ring_buffer* r, uint32_t bytes) {
70 uint32_t read_view;
71 __atomic_load(&r->read_pos, &read_view, __ATOMIC_SEQ_CST);
72 return get_ring_pos(read_view - r->write_pos - 1) >= bytes;
73 }
74
ring_buffer_can_read(const struct ring_buffer * r,uint32_t bytes)75 bool ring_buffer_can_read(const struct ring_buffer* r, uint32_t bytes) {
76 uint32_t write_view;
77 __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST);
78 return get_ring_pos(write_view - r->read_pos) >= bytes;
79 }
80
ring_buffer_write(struct ring_buffer * r,const void * data,uint32_t step_size,uint32_t steps)81 long ring_buffer_write(
82 struct ring_buffer* r, const void* data, uint32_t step_size, uint32_t steps) {
83 const uint8_t* data_bytes = (const uint8_t*)data;
84 uint32_t i;
85
86 for (i = 0; i < steps; ++i) {
87 if (!ring_buffer_can_write(r, step_size)) {
88 errno = -EAGAIN;
89 return (long)i;
90 }
91
92 // Needs to be split up into 2 writes for the edge case.
93 uint32_t available_at_end =
94 RING_BUFFER_SIZE - get_ring_pos(r->write_pos);
95
96 if (step_size > available_at_end) {
97 uint32_t remaining = step_size - available_at_end;
98 memcpy(
99 &r->buf[get_ring_pos(r->write_pos)],
100 data_bytes + i * step_size,
101 available_at_end);
102 memcpy(
103 &r->buf[get_ring_pos(r->write_pos + available_at_end)],
104 data_bytes + i * step_size + available_at_end,
105 remaining);
106 } else {
107 memcpy(
108 &r->buf[get_ring_pos(r->write_pos)],
109 data_bytes + i * step_size,
110 step_size);
111 }
112
113 __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST);
114 }
115
116 errno = 0;
117 return (long)steps;
118 }
119
ring_buffer_read(struct ring_buffer * r,void * data,uint32_t step_size,uint32_t steps)120 long ring_buffer_read(
121 struct ring_buffer* r, void* data, uint32_t step_size, uint32_t steps) {
122 uint8_t* data_bytes = (uint8_t*)data;
123 uint32_t i;
124
125 for (i = 0; i < steps; ++i) {
126 if (!ring_buffer_can_read(r, step_size)) {
127 errno = -EAGAIN;
128 return (long)i;
129 }
130
131 // Needs to be split up into 2 reads for the edge case.
132 uint32_t available_at_end =
133 RING_BUFFER_SIZE - get_ring_pos(r->read_pos);
134
135 if (step_size > available_at_end) {
136 uint32_t remaining = step_size - available_at_end;
137 memcpy(
138 data_bytes + i * step_size,
139 &r->buf[get_ring_pos(r->read_pos)],
140 available_at_end);
141 memcpy(
142 data_bytes + i * step_size + available_at_end,
143 &r->buf[get_ring_pos(r->read_pos + available_at_end)],
144 remaining);
145 } else {
146 memcpy(
147 data_bytes + i * step_size,
148 &r->buf[get_ring_pos(r->read_pos)],
149 step_size);
150 }
151
152 __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST);
153 }
154
155 errno = 0;
156 return (long)steps;
157 }
158
ring_buffer_advance_write(struct ring_buffer * r,uint32_t step_size,uint32_t steps)159 long ring_buffer_advance_write(
160 struct ring_buffer* r, uint32_t step_size, uint32_t steps) {
161 uint32_t i;
162
163 for (i = 0; i < steps; ++i) {
164 if (!ring_buffer_can_write(r, step_size)) {
165 errno = -EAGAIN;
166 return (long)i;
167 }
168
169 __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST);
170 }
171
172 errno = 0;
173 return (long)steps;
174 }
175
ring_buffer_advance_read(struct ring_buffer * r,uint32_t step_size,uint32_t steps)176 long ring_buffer_advance_read(
177 struct ring_buffer* r, uint32_t step_size, uint32_t steps) {
178 uint32_t i;
179
180 for (i = 0; i < steps; ++i) {
181 if (!ring_buffer_can_read(r, step_size)) {
182 errno = -EAGAIN;
183 return (long)i;
184 }
185
186 __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST);
187 }
188
189 errno = 0;
190 return (long)steps;
191 }
192
ring_buffer_calc_shift(uint32_t size)193 uint32_t ring_buffer_calc_shift(uint32_t size) {
194 uint32_t shift = 0;
195 while ((1 << shift) < size) {
196 ++shift;
197 }
198
199 // if size is not a power of 2,
200 if ((1 << shift) > size) {
201 --shift;
202 }
203 return shift;
204 }
205
ring_buffer_view_init(struct ring_buffer * r,struct ring_buffer_view * v,uint8_t * buf,uint32_t size)206 void ring_buffer_view_init(
207 struct ring_buffer* r,
208 struct ring_buffer_view* v,
209 uint8_t* buf,
210 uint32_t size) {
211
212 uint32_t shift = ring_buffer_calc_shift(size);
213
214 ring_buffer_init(r);
215
216 v->buf = buf;
217 v->size = (1 << shift);
218 v->mask = (1 << shift) - 1;
219 }
220
ring_buffer_init_view_only(struct ring_buffer_view * v,uint8_t * buf,uint32_t size)221 void ring_buffer_init_view_only(
222 struct ring_buffer_view* v,
223 uint8_t* buf,
224 uint32_t size) {
225
226 uint32_t shift = ring_buffer_calc_shift(size);
227
228 v->buf = buf;
229 v->size = (1 << shift);
230 v->mask = (1 << shift) - 1;
231 }
232
ring_buffer_view_get_ring_pos(const struct ring_buffer_view * v,uint32_t index)233 uint32_t ring_buffer_view_get_ring_pos(
234 const struct ring_buffer_view* v,
235 uint32_t index) {
236 return index & v->mask;
237 }
238
ring_buffer_view_can_write(const struct ring_buffer * r,const struct ring_buffer_view * v,uint32_t bytes)239 bool ring_buffer_view_can_write(
240 const struct ring_buffer* r,
241 const struct ring_buffer_view* v,
242 uint32_t bytes) {
243 uint32_t read_view;
244 __atomic_load(&r->read_pos, &read_view, __ATOMIC_SEQ_CST);
245 return ring_buffer_view_get_ring_pos(
246 v, read_view - r->write_pos - 1) >= bytes;
247 }
248
ring_buffer_view_can_read(const struct ring_buffer * r,const struct ring_buffer_view * v,uint32_t bytes)249 bool ring_buffer_view_can_read(
250 const struct ring_buffer* r,
251 const struct ring_buffer_view* v,
252 uint32_t bytes) {
253 uint32_t write_view;
254 __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST);
255 return ring_buffer_view_get_ring_pos(
256 v, write_view - r->read_pos) >= bytes;
257 }
258
ring_buffer_available_read(const struct ring_buffer * r,const struct ring_buffer_view * v)259 uint32_t ring_buffer_available_read(
260 const struct ring_buffer* r,
261 const struct ring_buffer_view* v) {
262 uint32_t write_view;
263 __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST);
264 if (v) {
265 return ring_buffer_view_get_ring_pos(
266 v, write_view - r->read_pos);
267 } else {
268 return get_ring_pos(write_view - r->read_pos);
269 }
270 }
271
ring_buffer_copy_contents(const struct ring_buffer * r,const struct ring_buffer_view * v,uint32_t wanted_bytes,uint8_t * res)272 int ring_buffer_copy_contents(
273 const struct ring_buffer* r,
274 const struct ring_buffer_view* v,
275 uint32_t wanted_bytes,
276 uint8_t* res) {
277
278 uint32_t total_available =
279 ring_buffer_available_read(r, v);
280 uint32_t available_at_end = 0;
281
282 if (v) {
283 available_at_end =
284 v->size - ring_buffer_view_get_ring_pos(v, r->read_pos);
285 } else {
286 available_at_end =
287 RING_BUFFER_SIZE - get_ring_pos(r->write_pos);
288 }
289
290 if (total_available < wanted_bytes) {
291 return -1;
292 }
293
294 if (v) {
295 if (wanted_bytes > available_at_end) {
296 uint32_t remaining = wanted_bytes - available_at_end;
297 memcpy(res,
298 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
299 available_at_end);
300 memcpy(res + available_at_end,
301 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos + available_at_end)],
302 remaining);
303 } else {
304 memcpy(res,
305 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
306 wanted_bytes);
307 }
308 } else {
309 if (wanted_bytes > available_at_end) {
310 uint32_t remaining = wanted_bytes - available_at_end;
311 memcpy(res,
312 &r->buf[get_ring_pos(r->read_pos)],
313 available_at_end);
314 memcpy(res + available_at_end,
315 &r->buf[get_ring_pos(r->read_pos + available_at_end)],
316 remaining);
317 } else {
318 memcpy(res,
319 &r->buf[get_ring_pos(r->read_pos)],
320 wanted_bytes);
321 }
322 }
323 return 0;
324 }
325
ring_buffer_view_write(struct ring_buffer * r,struct ring_buffer_view * v,const void * data,uint32_t step_size,uint32_t steps)326 long ring_buffer_view_write(
327 struct ring_buffer* r,
328 struct ring_buffer_view* v,
329 const void* data, uint32_t step_size, uint32_t steps) {
330
331 uint8_t* data_bytes = (uint8_t*)data;
332 uint32_t i;
333
334 for (i = 0; i < steps; ++i) {
335 if (!ring_buffer_view_can_write(r, v, step_size)) {
336 errno = -EAGAIN;
337 return (long)i;
338 }
339
340 // Needs to be split up into 2 writes for the edge case.
341 uint32_t available_at_end =
342 v->size - ring_buffer_view_get_ring_pos(v, r->write_pos);
343
344 if (step_size > available_at_end) {
345 uint32_t remaining = step_size - available_at_end;
346 memcpy(
347 &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos)],
348 data_bytes + i * step_size,
349 available_at_end);
350 memcpy(
351 &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos + available_at_end)],
352 data_bytes + i * step_size + available_at_end,
353 remaining);
354 } else {
355 memcpy(
356 &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos)],
357 data_bytes + i * step_size,
358 step_size);
359 }
360
361 __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST);
362 }
363
364 errno = 0;
365 return (long)steps;
366
367 }
368
ring_buffer_view_read(struct ring_buffer * r,struct ring_buffer_view * v,void * data,uint32_t step_size,uint32_t steps)369 long ring_buffer_view_read(
370 struct ring_buffer* r,
371 struct ring_buffer_view* v,
372 void* data, uint32_t step_size, uint32_t steps) {
373 uint8_t* data_bytes = (uint8_t*)data;
374 uint32_t i;
375
376 for (i = 0; i < steps; ++i) {
377 if (!ring_buffer_view_can_read(r, v, step_size)) {
378 errno = -EAGAIN;
379 return (long)i;
380 }
381
382 // Needs to be split up into 2 reads for the edge case.
383 uint32_t available_at_end =
384 v->size - ring_buffer_view_get_ring_pos(v, r->read_pos);
385
386 if (step_size > available_at_end) {
387 uint32_t remaining = step_size - available_at_end;
388 memcpy(
389 data_bytes + i * step_size,
390 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
391 available_at_end);
392 memcpy(
393 data_bytes + i * step_size + available_at_end,
394 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos + available_at_end)],
395 remaining);
396 } else {
397 memcpy(data_bytes + i * step_size,
398 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
399 step_size);
400 }
401 __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST);
402 }
403
404 errno = 0;
405 return (long)steps;
406 }
407
ring_buffer_yield()408 void ring_buffer_yield() { }
409
ring_buffer_wait_write(const struct ring_buffer * r,const struct ring_buffer_view * v,uint32_t bytes)410 bool ring_buffer_wait_write(
411 const struct ring_buffer* r,
412 const struct ring_buffer_view* v,
413 uint32_t bytes) {
414
415 bool can_write =
416 v ? ring_buffer_view_can_write(r, v, bytes) :
417 ring_buffer_can_write(r, bytes);
418
419 while (!can_write) {
420 ring_buffer_yield();
421 can_write =
422 v ? ring_buffer_view_can_write(r, v, bytes) :
423 ring_buffer_can_write(r, bytes);
424 }
425
426 return true;
427 }
428
ring_buffer_wait_read(const struct ring_buffer * r,const struct ring_buffer_view * v,uint32_t bytes)429 bool ring_buffer_wait_read(
430 const struct ring_buffer* r,
431 const struct ring_buffer_view* v,
432 uint32_t bytes) {
433
434 bool can_read =
435 v ? ring_buffer_view_can_read(r, v, bytes) :
436 ring_buffer_can_read(r, bytes);
437
438 while (!can_read) {
439 ring_buffer_yield();
440 can_read =
441 v ? ring_buffer_view_can_read(r, v, bytes) :
442 ring_buffer_can_read(r, bytes);
443 }
444
445 ((struct ring_buffer*)r)->read_live_count++;
446 return true;
447 }
448
get_step_size(struct ring_buffer_view * v,uint32_t bytes)449 static uint32_t get_step_size(
450 struct ring_buffer_view* v,
451 uint32_t bytes) {
452
453 uint32_t available = v ? (v->size >> 1) : (RING_BUFFER_SIZE >> 1);
454 uint32_t res = available < bytes ? available : bytes;
455
456 return res;
457 }
458
ring_buffer_write_fully(struct ring_buffer * r,struct ring_buffer_view * v,const void * data,uint32_t bytes)459 void ring_buffer_write_fully(
460 struct ring_buffer* r,
461 struct ring_buffer_view* v,
462 const void* data,
463 uint32_t bytes) {
464 ring_buffer_write_fully_with_abort(r, v, data, bytes, 0, 0);
465 }
466
ring_buffer_read_fully(struct ring_buffer * r,struct ring_buffer_view * v,void * data,uint32_t bytes)467 void ring_buffer_read_fully(
468 struct ring_buffer* r,
469 struct ring_buffer_view* v,
470 void* data,
471 uint32_t bytes) {
472 ring_buffer_read_fully_with_abort(r, v, data, bytes, 0, 0);
473 }
474
ring_buffer_write_fully_with_abort(struct ring_buffer * r,struct ring_buffer_view * v,const void * data,uint32_t bytes,uint32_t abort_value,const volatile uint32_t * abort_ptr)475 uint32_t ring_buffer_write_fully_with_abort(
476 struct ring_buffer* r,
477 struct ring_buffer_view* v,
478 const void* data,
479 uint32_t bytes,
480 uint32_t abort_value,
481 const volatile uint32_t* abort_ptr) {
482
483 uint32_t candidate_step = get_step_size(v, bytes);
484 uint32_t processed = 0;
485
486 uint8_t* dst = (uint8_t*)data;
487
488 while (processed < bytes) {
489 if (bytes - processed < candidate_step) {
490 candidate_step = bytes - processed;
491 }
492
493 long processed_here = 0;
494 ring_buffer_wait_write(r, v, candidate_step);
495
496 if (v) {
497 processed_here = ring_buffer_view_write(r, v, dst + processed, candidate_step, 1);
498 } else {
499 processed_here = ring_buffer_write(r, dst + processed, candidate_step, 1);
500 }
501
502 processed += processed_here ? candidate_step : 0;
503
504 if (abort_ptr && (abort_value == *abort_ptr)) {
505 return processed;
506 }
507 }
508
509 return processed;
510 }
511
ring_buffer_read_fully_with_abort(struct ring_buffer * r,struct ring_buffer_view * v,void * data,uint32_t bytes,uint32_t abort_value,const volatile uint32_t * abort_ptr)512 uint32_t ring_buffer_read_fully_with_abort(
513 struct ring_buffer* r,
514 struct ring_buffer_view* v,
515 void* data,
516 uint32_t bytes,
517 uint32_t abort_value,
518 const volatile uint32_t* abort_ptr) {
519
520 uint32_t candidate_step = get_step_size(v, bytes);
521 uint32_t processed = 0;
522
523 uint8_t* dst = (uint8_t*)data;
524
525 while (processed < bytes) {
526 ring_buffer_pause();
527 if (bytes - processed < candidate_step) {
528 candidate_step = bytes - processed;
529 }
530
531 long processed_here = 0;
532 ring_buffer_wait_read(r, v, candidate_step);
533
534 if (v) {
535 processed_here = ring_buffer_view_read(r, v, dst + processed, candidate_step, 1);
536 } else {
537 processed_here = ring_buffer_read(r, dst + processed, candidate_step, 1);
538 }
539
540 processed += processed_here ? candidate_step : 0;
541
542 if (abort_ptr && (abort_value == *abort_ptr)) {
543 return processed;
544 }
545 }
546
547 return processed;
548 }
549
ring_buffer_sync_init(struct ring_buffer * r)550 void ring_buffer_sync_init(struct ring_buffer* r) {
551 __atomic_store_n(&r->state, RING_BUFFER_SYNC_PRODUCER_IDLE, __ATOMIC_SEQ_CST);
552 }
553
ring_buffer_producer_acquire(struct ring_buffer * r)554 bool ring_buffer_producer_acquire(struct ring_buffer* r) {
555 uint32_t expected_idle = RING_BUFFER_SYNC_PRODUCER_IDLE;
556 bool success = __atomic_compare_exchange_n(
557 &r->state,
558 &expected_idle,
559 RING_BUFFER_SYNC_PRODUCER_ACTIVE,
560 false /* strong */,
561 __ATOMIC_SEQ_CST,
562 __ATOMIC_SEQ_CST);
563 return success;
564 }
565
ring_buffer_producer_acquire_from_hangup(struct ring_buffer * r)566 bool ring_buffer_producer_acquire_from_hangup(struct ring_buffer* r) {
567 uint32_t expected_hangup = RING_BUFFER_SYNC_CONSUMER_HUNG_UP;
568 bool success = __atomic_compare_exchange_n(
569 &r->state,
570 &expected_hangup,
571 RING_BUFFER_SYNC_PRODUCER_ACTIVE,
572 false /* strong */,
573 __ATOMIC_SEQ_CST,
574 __ATOMIC_SEQ_CST);
575 return success;
576 }
577
ring_buffer_producer_wait_hangup(struct ring_buffer * r)578 void ring_buffer_producer_wait_hangup(struct ring_buffer* r) {
579 while (__atomic_load_n(&r->state, __ATOMIC_SEQ_CST) !=
580 RING_BUFFER_SYNC_CONSUMER_HUNG_UP) {
581 ring_buffer_yield();
582 }
583 }
584
ring_buffer_producer_idle(struct ring_buffer * r)585 void ring_buffer_producer_idle(struct ring_buffer* r) {
586 __atomic_store_n(&r->state, RING_BUFFER_SYNC_PRODUCER_IDLE, __ATOMIC_SEQ_CST);
587 }
588
ring_buffer_consumer_hangup(struct ring_buffer * r)589 bool ring_buffer_consumer_hangup(struct ring_buffer* r) {
590 uint32_t expected_idle = RING_BUFFER_SYNC_PRODUCER_IDLE;
591 bool success = __atomic_compare_exchange_n(
592 &r->state,
593 &expected_idle,
594 RING_BUFFER_SYNC_CONSUMER_HANGING_UP,
595 false /* strong */,
596 __ATOMIC_SEQ_CST,
597 __ATOMIC_SEQ_CST);
598 return success;
599 }
600
ring_buffer_consumer_wait_producer_idle(struct ring_buffer * r)601 void ring_buffer_consumer_wait_producer_idle(struct ring_buffer* r) {
602 while (__atomic_load_n(&r->state, __ATOMIC_SEQ_CST) !=
603 RING_BUFFER_SYNC_PRODUCER_IDLE) {
604 ring_buffer_yield();
605 }
606 }
607
ring_buffer_consumer_hung_up(struct ring_buffer * r)608 void ring_buffer_consumer_hung_up(struct ring_buffer* r) {
609 __atomic_store_n(&r->state, RING_BUFFER_SYNC_CONSUMER_HUNG_UP, __ATOMIC_SEQ_CST);
610 }
611