1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <log/log.h>
18 #include "ring_buffer.h"
19
20 namespace android {
21 namespace hardware {
22 namespace audio {
23 namespace V6_0 {
24 namespace implementation {
25
RingBuffer(size_t capacity)26 RingBuffer::RingBuffer(size_t capacity)
27 : mBuffer(new uint8_t[capacity])
28 , mCapacity(capacity) {}
29
availableToProduce() const30 size_t RingBuffer::availableToProduce() const {
31 std::lock_guard<std::mutex> guard(mMutex);
32 return mCapacity - mAvailableToConsume;
33 }
34
availableToConsume() const35 size_t RingBuffer::availableToConsume() const {
36 std::unique_lock<std::mutex> lock(mMutex);
37 return mAvailableToConsume;
38 }
39
makeRoomForProduce(size_t atLeast)40 size_t RingBuffer::makeRoomForProduce(size_t atLeast) {
41 std::unique_lock<std::mutex> lock(mMutex);
42 LOG_ALWAYS_FATAL_IF(atLeast >= mCapacity);
43
44 const size_t toProduce = mCapacity - mAvailableToConsume;
45 const size_t toDrop = (atLeast <= toProduce)
46 ? 0 : atLeast - toProduce;
47
48 mConsumePos = (mConsumePos + toDrop) % mCapacity;
49 mAvailableToConsume -= toDrop;
50
51 return toDrop;
52 }
53
waitForProduceAvailable(Timepoint blockUntil) const54 bool RingBuffer::waitForProduceAvailable(Timepoint blockUntil) const {
55 std::unique_lock<std::mutex> lock(mMutex);
56 while (true) {
57 if (mAvailableToConsume < mCapacity) {
58 return true;
59 } else if (mProduceAvailable.wait_until(lock, blockUntil) == std::cv_status::timeout) {
60 return false;
61 }
62 }
63 }
64
getProduceChunk() const65 RingBuffer::ContiniousChunk RingBuffer::getProduceChunk() const {
66 std::unique_lock<std::mutex> lock(mMutex);
67 const int availableToProduce = mCapacity - mAvailableToConsume;
68
69 ContiniousChunk chunk;
70
71 chunk.data = &mBuffer[mProducePos];
72 chunk.size = (mProducePos >= mConsumePos)
73 ? std::min(mCapacity - mProducePos, availableToProduce)
74 : std::min(mConsumePos - mProducePos, availableToProduce);
75
76 return chunk;
77 }
78
produce(size_t size)79 size_t RingBuffer::produce(size_t size) {
80 std::unique_lock<std::mutex> lock(mMutex);
81 const int availableToProduce = mCapacity - mAvailableToConsume;
82 size = std::min(size, size_t(availableToProduce));
83
84 mProducePos = (mProducePos + size) % mCapacity;
85 mAvailableToConsume += size;
86
87 mConsumeAvailable.notify_one();
88 return size;
89 }
90
produce(const void * srcRaw,size_t size)91 size_t RingBuffer::produce(const void *srcRaw, size_t size) {
92 std::unique_lock<std::mutex> lock(mMutex);
93 int produceSize = std::min(mCapacity - mAvailableToConsume, int(size));
94 size = produceSize;
95 const uint8_t *src = static_cast<const uint8_t *>(srcRaw);
96
97 while (produceSize > 0) {
98 const int availableToProduce = mCapacity - mAvailableToConsume;
99 const int chunkSz = (mProducePos >= mConsumePos)
100 ? std::min(mCapacity - mProducePos, availableToProduce)
101 : std::min(mConsumePos - mProducePos, availableToProduce);
102 void *dst = &mBuffer[mProducePos];
103
104 memcpy(dst, src, chunkSz);
105 src += chunkSz;
106 mProducePos = (mProducePos + chunkSz) % mCapacity;
107 mAvailableToConsume += chunkSz;
108 produceSize -= chunkSz;
109 }
110
111 mConsumeAvailable.notify_one();
112 return size;
113 }
114
waitForConsumeAvailable(Timepoint blockUntil) const115 bool RingBuffer::waitForConsumeAvailable(Timepoint blockUntil) const {
116 std::unique_lock<std::mutex> lock(mMutex);
117 while (true) {
118 if (mAvailableToConsume > 0) {
119 return true;
120 } else if (mConsumeAvailable.wait_until(lock, blockUntil) == std::cv_status::timeout) {
121 return false;
122 }
123 }
124 }
125
getConsumeChunk() const126 RingBuffer::ContiniousLockedChunk RingBuffer::getConsumeChunk() const {
127 std::unique_lock<std::mutex> lock(mMutex);
128
129 ContiniousLockedChunk chunk;
130
131 chunk.data = &mBuffer[mConsumePos];
132 chunk.size = (mConsumePos >= mProducePos)
133 ? std::min(mCapacity - mConsumePos, mAvailableToConsume)
134 : std::min(mProducePos - mConsumePos, mAvailableToConsume);
135 chunk.lock = std::move(lock);
136
137 return chunk;
138 }
139
consume(const ContiniousLockedChunk & lock,size_t size)140 size_t RingBuffer::consume(const ContiniousLockedChunk &lock, size_t size) {
141 (void)lock; // the lock is provided by getConsumeChunk
142 size = std::min(size, size_t(mAvailableToConsume));
143
144 mConsumePos = (mConsumePos + size) % mCapacity;
145 mAvailableToConsume -= size;
146
147 mProduceAvailable.notify_one();
148 return size;
149 }
150
151 } // namespace implementation
152 } // namespace V6_0
153 } // namespace audio
154 } // namespace hardware
155 } // namespace android
156