1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "AddressSpaceStream.h"
17
18 #if PLATFORM_SDK_VERSION < 26
19 #include <cutils/log.h>
20 #else
21 #include <log/log.h>
22 #endif
23 #include <errno.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <string.h>
28
29 static const size_t kReadSize = 512 * 1024;
30 static const size_t kWriteOffset = kReadSize;
31
createAddressSpaceStream(size_t ignored_bufSize)32 AddressSpaceStream* createAddressSpaceStream(size_t ignored_bufSize) {
33 // Ignore incoming ignored_bufSize
34 (void)ignored_bufSize;
35
36 auto handle = goldfish_address_space_open();
37 address_space_handle_t child_device_handle;
38
39 if (!goldfish_address_space_set_subdevice_type(handle, GoldfishAddressSpaceSubdeviceType::Graphics, &child_device_handle)) {
40 ALOGE("AddressSpaceStream::create failed (initial device create)\n");
41 goldfish_address_space_close(handle);
42 return nullptr;
43 }
44
45 struct address_space_ping request;
46 request.metadata = ASG_GET_RING;
47 if (!goldfish_address_space_ping(child_device_handle, &request)) {
48 ALOGE("AddressSpaceStream::create failed (get ring)\n");
49 goldfish_address_space_close(child_device_handle);
50 return nullptr;
51 }
52
53 uint64_t ringOffset = request.metadata;
54
55 request.metadata = ASG_GET_BUFFER;
56 if (!goldfish_address_space_ping(child_device_handle, &request)) {
57 ALOGE("AddressSpaceStream::create failed (get buffer)\n");
58 goldfish_address_space_close(child_device_handle);
59 return nullptr;
60 }
61
62 uint64_t bufferOffset = request.metadata;
63 uint64_t bufferSize = request.size;
64
65 if (!goldfish_address_space_claim_shared(
66 child_device_handle, ringOffset, sizeof(asg_ring_storage))) {
67 ALOGE("AddressSpaceStream::create failed (claim ring storage)\n");
68 goldfish_address_space_close(child_device_handle);
69 return nullptr;
70 }
71
72 if (!goldfish_address_space_claim_shared(
73 child_device_handle, bufferOffset, bufferSize)) {
74 ALOGE("AddressSpaceStream::create failed (claim buffer storage)\n");
75 goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
76 goldfish_address_space_close(child_device_handle);
77 return nullptr;
78 }
79
80 char* ringPtr = (char*)goldfish_address_space_map(
81 child_device_handle, ringOffset, sizeof(struct asg_ring_storage));
82
83 if (!ringPtr) {
84 ALOGE("AddressSpaceStream::create failed (map ring storage)\n");
85 goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
86 goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
87 goldfish_address_space_close(child_device_handle);
88 return nullptr;
89 }
90
91 char* bufferPtr = (char*)goldfish_address_space_map(
92 child_device_handle, bufferOffset, bufferSize);
93
94 if (!bufferPtr) {
95 ALOGE("AddressSpaceStream::create failed (map buffer storage)\n");
96 goldfish_address_space_unmap(ringPtr, sizeof(struct asg_ring_storage));
97 goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
98 goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
99 goldfish_address_space_close(child_device_handle);
100 return nullptr;
101 }
102
103 struct asg_context context =
104 asg_context_create(
105 ringPtr, bufferPtr, bufferSize);
106
107 request.metadata = ASG_SET_VERSION;
108 request.size = 1; // version 1
109
110 if (!goldfish_address_space_ping(child_device_handle, &request)) {
111 ALOGE("AddressSpaceStream::create failed (get buffer)\n");
112 goldfish_address_space_unmap(bufferPtr, bufferSize);
113 goldfish_address_space_unmap(ringPtr, sizeof(struct asg_ring_storage));
114 goldfish_address_space_unclaim_shared(child_device_handle, bufferOffset);
115 goldfish_address_space_unclaim_shared(child_device_handle, ringOffset);
116 goldfish_address_space_close(child_device_handle);
117 return nullptr;
118 }
119
120 uint32_t version = request.size;
121
122 context.ring_config->transfer_mode = 1;
123 context.ring_config->host_consumed_pos = 0;
124 context.ring_config->guest_write_pos = 0;
125
126 struct address_space_ops ops = {
127 .open = goldfish_address_space_open,
128 .close = goldfish_address_space_close,
129 .claim_shared = goldfish_address_space_claim_shared,
130 .unclaim_shared = goldfish_address_space_unclaim_shared,
131 .map = goldfish_address_space_map,
132 .unmap = goldfish_address_space_unmap,
133 .set_subdevice_type = goldfish_address_space_set_subdevice_type,
134 .ping = goldfish_address_space_ping,
135 };
136
137 AddressSpaceStream* res =
138 new AddressSpaceStream(
139 child_device_handle, version, context,
140 ringOffset, bufferOffset, false /* not virtio */, ops);
141
142 return res;
143 }
144
145 #if defined(HOST_BUILD) || defined(__Fuchsia__)
createVirtioGpuAddressSpaceStream(size_t ignored_bufSize)146 AddressSpaceStream* createVirtioGpuAddressSpaceStream(size_t ignored_bufSize) {
147 // Ignore incoming ignored_bufSize
148 (void)ignored_bufSize;
149 return nullptr;
150 }
151 #else
createVirtioGpuAddressSpaceStream(size_t ignored_bufSize)152 AddressSpaceStream* createVirtioGpuAddressSpaceStream(size_t ignored_bufSize) {
153 // Ignore incoming ignored_bufSize
154 (void)ignored_bufSize;
155
156 auto handle = virtgpu_address_space_open();
157
158 if (handle == reinterpret_cast<address_space_handle_t>(-1)) {
159 ALOGE("AddressSpaceStream::create failed (open device)\n");
160 return nullptr;
161 }
162
163 struct address_space_virtgpu_info virtgpu_info;
164
165 ALOGD("%s: create subdevice and get resp\n", __func__);
166 if (!virtgpu_address_space_create_context_with_subdevice(
167 handle, GoldfishAddressSpaceSubdeviceType::VirtioGpuGraphics,
168 &virtgpu_info)) {
169 ALOGE("AddressSpaceStream::create failed (create subdevice)\n");
170 virtgpu_address_space_close(handle);
171 return nullptr;
172 }
173 ALOGD("%s: create subdevice and get resp (done)\n", __func__);
174
175 struct address_space_ping request;
176 uint32_t ringSize = 0;
177 uint32_t bufferSize = 0;
178
179 request.metadata = ASG_GET_RING;
180 if (!virtgpu_address_space_ping_with_response(
181 &virtgpu_info, &request)) {
182 ALOGE("AddressSpaceStream::create failed (get ring version)\n");
183 virtgpu_address_space_close(handle);
184 return nullptr;
185 }
186 ringSize = request.size;
187
188 request.metadata = ASG_GET_BUFFER;
189 if (!virtgpu_address_space_ping_with_response(
190 &virtgpu_info, &request)) {
191 ALOGE("AddressSpaceStream::create failed (get ring version)\n");
192 virtgpu_address_space_close(handle);
193 return nullptr;
194 }
195 bufferSize = request.size;
196
197 request.metadata = ASG_SET_VERSION;
198 request.size = 1; // version 1
199
200 if (!virtgpu_address_space_ping_with_response(
201 &virtgpu_info, &request)) {
202 ALOGE("AddressSpaceStream::create failed (set version)\n");
203 virtgpu_address_space_close(handle);
204 return nullptr;
205 }
206
207 ALOGD("%s: ping returned. context ring and buffer sizes %u %u\n", __func__,
208 ringSize, bufferSize);
209
210 uint64_t hostmem_id = request.metadata;
211 uint32_t version = request.size;
212 size_t hostmem_alloc_size =
213 (size_t)(ringSize + bufferSize);
214
215 ALOGD("%s: hostmem size: %zu\n", __func__, hostmem_alloc_size);
216
217 struct address_space_virtgpu_hostmem_info hostmem_info;
218 if (!virtgpu_address_space_allocate_hostmem(
219 handle,
220 hostmem_alloc_size,
221 hostmem_id,
222 &hostmem_info)) {
223 ALOGE("AddressSpaceStream::create failed (alloc hostmem)\n");
224 virtgpu_address_space_close(handle);
225 return nullptr;
226 }
227
228 request.metadata = ASG_GET_CONFIG;
229 if (!virtgpu_address_space_ping_with_response(
230 &virtgpu_info, &request)) {
231 ALOGE("AddressSpaceStream::create failed (get config)\n");
232 virtgpu_address_space_close(handle);
233 return nullptr;
234 }
235
236 char* ringPtr = (char*)hostmem_info.ptr;
237 char* bufferPtr = ((char*)hostmem_info.ptr) + sizeof(struct asg_ring_storage);
238
239 struct asg_context context =
240 asg_context_create(
241 (char*)ringPtr, (char*)bufferPtr, bufferSize);
242
243 context.ring_config->transfer_mode = 1;
244 context.ring_config->host_consumed_pos = 0;
245 context.ring_config->guest_write_pos = 0;
246
247 struct address_space_ops ops = {
248 .open = virtgpu_address_space_open,
249 .close = virtgpu_address_space_close,
250 .ping = virtgpu_address_space_ping,
251 .allocate_hostmem = virtgpu_address_space_allocate_hostmem,
252 .ping_with_response = virtgpu_address_space_ping_with_response,
253 };
254
255 AddressSpaceStream* res =
256 new AddressSpaceStream(
257 handle, version, context,
258 0, 0, true /* is virtio */, ops);
259
260 return res;
261 }
262 #endif // HOST_BUILD || __Fuchsia__
263
264
AddressSpaceStream(address_space_handle_t handle,uint32_t version,struct asg_context context,uint64_t ringOffset,uint64_t writeBufferOffset,bool virtioMode,struct address_space_ops ops)265 AddressSpaceStream::AddressSpaceStream(
266 address_space_handle_t handle,
267 uint32_t version,
268 struct asg_context context,
269 uint64_t ringOffset,
270 uint64_t writeBufferOffset,
271 bool virtioMode,
272 struct address_space_ops ops) :
273 IOStream(context.ring_config->flush_interval),
274 m_virtioMode(virtioMode),
275 m_ops(ops),
276 m_tmpBuf(0),
277 m_tmpBufSize(0),
278 m_tmpBufXferSize(0),
279 m_usingTmpBuf(0),
280 m_readBuf(0),
281 m_read(0),
282 m_readLeft(0),
283 m_handle(handle),
284 m_version(version),
285 m_context(context),
286 m_ringOffset(ringOffset),
287 m_writeBufferOffset(writeBufferOffset),
288 m_writeBufferSize(context.ring_config->buffer_size),
289 m_writeBufferMask(m_writeBufferSize - 1),
290 m_buf((unsigned char*)context.buffer),
291 m_writeStart(m_buf),
292 m_writeStep(context.ring_config->flush_interval),
293 m_notifs(0),
294 m_written(0) {
295 // We'll use this in the future, but at the moment,
296 // it's a potential compile Werror.
297 (void)m_version;
298 }
299
~AddressSpaceStream()300 AddressSpaceStream::~AddressSpaceStream() {
301 if (!m_virtioMode) {
302 m_ops.unmap(m_context.to_host, sizeof(struct asg_ring_storage));
303 m_ops.unmap(m_context.buffer, m_writeBufferSize);
304 m_ops.unclaim_shared(m_handle, m_ringOffset);
305 m_ops.unclaim_shared(m_handle, m_writeBufferOffset);
306 }
307 m_ops.close(m_handle);
308 if (m_readBuf) free(m_readBuf);
309 if (m_tmpBuf) free(m_tmpBuf);
310 }
311
idealAllocSize(size_t len)312 size_t AddressSpaceStream::idealAllocSize(size_t len) {
313 if (len > m_writeStep) return len;
314 return m_writeStep;
315 }
316
allocBuffer(size_t minSize)317 void *AddressSpaceStream::allocBuffer(size_t minSize) {
318 if (!m_readBuf) {
319 m_readBuf = (unsigned char*)malloc(kReadSize);
320 }
321
322 size_t allocSize =
323 (m_writeStep < minSize ? minSize : m_writeStep);
324
325 if (m_writeStep < allocSize) {
326 if (!m_tmpBuf) {
327 m_tmpBufSize = allocSize * 2;
328 m_tmpBuf = (unsigned char*)malloc(m_tmpBufSize);
329 }
330
331 if (m_tmpBufSize < allocSize) {
332 m_tmpBufSize = allocSize * 2;
333 m_tmpBuf = (unsigned char*)realloc(m_tmpBuf, m_tmpBufSize);
334 }
335
336 if (!m_usingTmpBuf) {
337 flush();
338 }
339
340 m_usingTmpBuf = true;
341 m_tmpBufXferSize = allocSize;
342 return m_tmpBuf;
343 } else {
344 if (m_usingTmpBuf) {
345 writeFully(m_tmpBuf, m_tmpBufXferSize);
346 m_usingTmpBuf = false;
347 m_tmpBufXferSize = 0;
348 }
349
350 return m_writeStart;
351 }
352 };
353
commitBuffer(size_t size)354 int AddressSpaceStream::commitBuffer(size_t size)
355 {
356 if (size == 0) return 0;
357
358 if (m_usingTmpBuf) {
359 writeFully(m_tmpBuf, size);
360 m_tmpBufXferSize = 0;
361 m_usingTmpBuf = false;
362 return 0;
363 } else {
364 int res = type1Write(m_writeStart - m_buf, size);
365 advanceWrite();
366 return res;
367 }
368 }
369
readFully(void * ptr,size_t totalReadSize)370 const unsigned char *AddressSpaceStream::readFully(void *ptr, size_t totalReadSize)
371 {
372
373 unsigned char* userReadBuf = static_cast<unsigned char*>(ptr);
374
375 if (!userReadBuf) {
376 if (totalReadSize > 0) {
377 ALOGE("AddressSpaceStream::commitBufferAndReadFully failed, userReadBuf=NULL, totalReadSize %zu, lethal"
378 " error, exiting.", totalReadSize);
379 abort();
380 }
381 return nullptr;
382 }
383
384 // Advance buffered read if not yet consumed.
385 size_t remaining = totalReadSize;
386 size_t bufferedReadSize =
387 m_readLeft < remaining ? m_readLeft : remaining;
388
389 if (bufferedReadSize) {
390 memcpy(userReadBuf,
391 m_readBuf + (m_read - m_readLeft),
392 bufferedReadSize);
393 remaining -= bufferedReadSize;
394 m_readLeft -= bufferedReadSize;
395 }
396
397 if (!remaining) return userReadBuf;
398
399 // Read up to kReadSize bytes if all buffered read has been consumed.
400 size_t maxRead = m_readLeft ? 0 : kReadSize;
401 ssize_t actual = 0;
402
403 if (maxRead) {
404 actual = speculativeRead(m_readBuf, maxRead);
405
406 // Updated buffered read size.
407 if (actual > 0) {
408 m_read = m_readLeft = actual;
409 }
410
411 if (actual == 0) {
412 ALOGD("%s: end of pipe", __FUNCTION__);
413 return NULL;
414 }
415 }
416
417 // Consume buffered read and read more if necessary.
418 while (remaining) {
419 bufferedReadSize = m_readLeft < remaining ? m_readLeft : remaining;
420 if (bufferedReadSize) {
421 memcpy(userReadBuf + (totalReadSize - remaining),
422 m_readBuf + (m_read - m_readLeft),
423 bufferedReadSize);
424 remaining -= bufferedReadSize;
425 m_readLeft -= bufferedReadSize;
426 continue;
427 }
428
429 actual = speculativeRead(m_readBuf, kReadSize);
430
431 if (actual == 0) {
432 ALOGD("%s: Failed reading from pipe: %d", __FUNCTION__, errno);
433 return NULL;
434 }
435
436 if (actual > 0) {
437 m_read = m_readLeft = actual;
438 continue;
439 }
440 }
441
442 return userReadBuf;
443 }
444
read(void * buf,size_t * inout_len)445 const unsigned char *AddressSpaceStream::read(void *buf, size_t *inout_len) {
446 unsigned char* dst = (unsigned char*)buf;
447 size_t wanted = *inout_len;
448 ssize_t actual = speculativeRead(dst, wanted);
449
450 if (actual >= 0) {
451 *inout_len = actual;
452 } else {
453 return nullptr;
454 }
455
456 return (const unsigned char*)dst;
457 }
458
writeFully(const void * buf,size_t size)459 int AddressSpaceStream::writeFully(const void *buf, size_t size)
460 {
461 ensureConsumerFinishing();
462 ensureType3Finished();
463 ensureType1Finished();
464
465 m_context.ring_config->transfer_size = size;
466 m_context.ring_config->transfer_mode = 3;
467
468 size_t sent = 0;
469 size_t quarterRingSize = m_writeBufferSize / 4;
470 size_t chunkSize = size < quarterRingSize ? size : quarterRingSize;
471 const uint8_t* bufferBytes = (const uint8_t*)buf;
472
473 while (sent < size) {
474 size_t remaining = size - sent;
475 size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
476
477 long sentChunks =
478 ring_buffer_view_write(
479 m_context.to_host_large_xfer.ring,
480 &m_context.to_host_large_xfer.view,
481 bufferBytes + sent, sendThisTime, 1);
482
483 if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
484 notifyAvailable();
485 }
486
487 if (sentChunks == 0) {
488 ring_buffer_yield();
489 }
490
491 sent += sentChunks * sendThisTime;
492
493 if (isInError()) {
494 return -1;
495 }
496 }
497
498 ensureType3Finished();
499 m_context.ring_config->transfer_mode = 1;
500 m_written += size;
501 return 0;
502 }
503
commitBufferAndReadFully(size_t writeSize,void * userReadBufPtr,size_t totalReadSize)504 const unsigned char *AddressSpaceStream::commitBufferAndReadFully(
505 size_t writeSize, void *userReadBufPtr, size_t totalReadSize) {
506
507 if (m_usingTmpBuf) {
508 writeFully(m_tmpBuf, writeSize);
509 m_usingTmpBuf = false;
510 m_tmpBufXferSize = 0;
511 return readFully(userReadBufPtr, totalReadSize);
512 } else {
513 commitBuffer(writeSize);
514 return readFully(userReadBufPtr, totalReadSize);
515 }
516 }
517
isInError() const518 bool AddressSpaceStream::isInError() const {
519 return 1 == m_context.ring_config->in_error;
520 }
521
speculativeRead(unsigned char * readBuffer,size_t trySize)522 ssize_t AddressSpaceStream::speculativeRead(unsigned char* readBuffer, size_t trySize) {
523 ensureConsumerFinishing();
524 ensureType3Finished();
525 ensureType1Finished();
526
527 size_t actuallyRead = 0;
528 size_t readIters = 0;
529 size_t backedOffIters = 0;
530 const size_t kSpeculativeReadBackoffIters = 10000000ULL;
531 while (!actuallyRead) {
532 ++readIters;
533
534 uint32_t readAvail =
535 ring_buffer_available_read(
536 m_context.from_host_large_xfer.ring,
537 &m_context.from_host_large_xfer.view);
538
539 if (!readAvail) {
540 ring_buffer_yield();
541 continue;
542 }
543
544 if (readAvail && readIters > kSpeculativeReadBackoffIters) {
545 usleep(10);
546 ++backedOffIters;
547 }
548
549 uint32_t toRead = readAvail > trySize ? trySize : readAvail;
550
551 long stepsRead = ring_buffer_view_read(
552 m_context.from_host_large_xfer.ring,
553 &m_context.from_host_large_xfer.view,
554 readBuffer, toRead, 1);
555
556 actuallyRead += stepsRead * toRead;
557
558 if (isInError()) {
559 return -1;
560 }
561 }
562
563 if (backedOffIters > 0) {
564 ALOGW("%s: backed off %zu times due to host slowness.\n",
565 __func__,
566 backedOffIters);
567 }
568
569 return actuallyRead;
570 }
571
notifyAvailable()572 void AddressSpaceStream::notifyAvailable() {
573 struct address_space_ping request;
574 request.metadata = ASG_NOTIFY_AVAILABLE;
575 m_ops.ping(m_handle, &request);
576 ++m_notifs;
577 }
578
getRelativeBufferPos(uint32_t pos)579 uint32_t AddressSpaceStream::getRelativeBufferPos(uint32_t pos) {
580 return pos & m_writeBufferMask;
581 }
582
advanceWrite()583 void AddressSpaceStream::advanceWrite() {
584 m_writeStart += m_context.ring_config->flush_interval;
585
586 if (m_writeStart == m_buf + m_context.ring_config->buffer_size) {
587 m_writeStart = m_buf;
588 }
589 }
590
ensureConsumerFinishing()591 void AddressSpaceStream::ensureConsumerFinishing() {
592 uint32_t currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
593
594 while (currAvailRead) {
595 ring_buffer_yield();
596 uint32_t nextAvailRead = ring_buffer_available_read(m_context.to_host, 0);
597
598 if (nextAvailRead != currAvailRead) {
599 break;
600 }
601
602 if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
603 notifyAvailable();
604 break;
605 }
606 }
607 }
608
ensureType1Finished()609 void AddressSpaceStream::ensureType1Finished() {
610 ensureConsumerFinishing();
611
612 uint32_t currAvailRead =
613 ring_buffer_available_read(m_context.to_host, 0);
614
615 while (currAvailRead) {
616 ring_buffer_yield();
617 currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
618 if (isInError()) {
619 return;
620 }
621 }
622 }
623
ensureType3Finished()624 void AddressSpaceStream::ensureType3Finished() {
625 uint32_t availReadLarge =
626 ring_buffer_available_read(
627 m_context.to_host_large_xfer.ring,
628 &m_context.to_host_large_xfer.view);
629 while (availReadLarge) {
630 ring_buffer_yield();
631 availReadLarge =
632 ring_buffer_available_read(
633 m_context.to_host_large_xfer.ring,
634 &m_context.to_host_large_xfer.view);
635 if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
636 notifyAvailable();
637 }
638 if (isInError()) {
639 return;
640 }
641 }
642 }
643
type1Write(uint32_t bufferOffset,size_t size)644 int AddressSpaceStream::type1Write(uint32_t bufferOffset, size_t size) {
645 size_t sent = 0;
646 size_t sizeForRing = sizeof(struct asg_type1_xfer);
647
648 struct asg_type1_xfer xfer = {
649 bufferOffset,
650 (uint32_t)size,
651 };
652
653 uint8_t* writeBufferBytes = (uint8_t*)(&xfer);
654
655 uint32_t maxOutstanding = 1;
656 uint32_t maxSteps = m_context.ring_config->buffer_size /
657 m_context.ring_config->flush_interval;
658
659 if (maxSteps > 1) maxOutstanding = maxSteps >> 1;
660
661 uint32_t ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
662
663 while (ringAvailReadNow >= maxOutstanding) {
664 ensureConsumerFinishing();
665 ring_buffer_yield();
666 ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
667 }
668
669 while (sent < sizeForRing) {
670
671 long sentChunks = ring_buffer_write(
672 m_context.to_host,
673 writeBufferBytes + sent,
674 sizeForRing - sent, 1);
675
676 if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
677 notifyAvailable();
678 }
679
680 if (sentChunks == 0) {
681 ring_buffer_yield();
682 }
683
684 sent += sentChunks * (sizeForRing - sent);
685
686 if (isInError()) {
687 return -1;
688 }
689 }
690
691 ensureConsumerFinishing();
692 m_written += size;
693
694 float mb = (float)m_written / 1048576.0f;
695 if (mb > 100.0f) {
696 ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
697 mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
698 m_notifs = 0;
699 m_written = 0;
700 }
701
702 return 0;
703 }
704