1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Vector"
18
19 #include <utils/VectorImpl.h>
20
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <string.h>
24
25 #include <log/log.h>
26
27 #include "SharedBuffer.h"
28
29 /*****************************************************************************/
30
31
32 namespace android {
33
34 // ----------------------------------------------------------------------------
35
36 const size_t kMinVectorCapacity = 4;
37
max(size_t a,size_t b)38 static inline size_t max(size_t a, size_t b) {
39 return a>b ? a : b;
40 }
41
42 // ----------------------------------------------------------------------------
43
VectorImpl(size_t itemSize,uint32_t flags)44 VectorImpl::VectorImpl(size_t itemSize, uint32_t flags)
45 : mStorage(nullptr), mCount(0), mFlags(flags), mItemSize(itemSize)
46 {
47 }
48
VectorImpl(const VectorImpl & rhs)49 VectorImpl::VectorImpl(const VectorImpl& rhs)
50 : mStorage(rhs.mStorage), mCount(rhs.mCount),
51 mFlags(rhs.mFlags), mItemSize(rhs.mItemSize)
52 {
53 if (mStorage) {
54 SharedBuffer::bufferFromData(mStorage)->acquire();
55 }
56 }
57
~VectorImpl()58 VectorImpl::~VectorImpl()
59 {
60 ALOGW_IF(mCount,
61 "[%p] subclasses of VectorImpl must call finish_vector()"
62 " in their destructor. Leaking %d bytes.",
63 this, (int)(mCount*mItemSize));
64 // We can't call _do_destroy() here because the vtable is already gone.
65 }
66
operator =(const VectorImpl & rhs)67 VectorImpl& VectorImpl::operator = (const VectorImpl& rhs)
68 {
69 LOG_ALWAYS_FATAL_IF(mItemSize != rhs.mItemSize,
70 "Vector<> have different types (this=%p, rhs=%p)", this, &rhs);
71 if (this != &rhs) {
72 release_storage();
73 if (rhs.mCount) {
74 mStorage = rhs.mStorage;
75 mCount = rhs.mCount;
76 SharedBuffer::bufferFromData(mStorage)->acquire();
77 } else {
78 mStorage = nullptr;
79 mCount = 0;
80 }
81 }
82 return *this;
83 }
84
editArrayImpl()85 void* VectorImpl::editArrayImpl()
86 {
87 if (mStorage) {
88 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
89 SharedBuffer* editable = sb->attemptEdit();
90 if (editable == nullptr) {
91 // If we're here, we're not the only owner of the buffer.
92 // We must make a copy of it.
93 editable = SharedBuffer::alloc(sb->size());
94 // Fail instead of returning a pointer to storage that's not
95 // editable. Otherwise we'd be editing the contents of a buffer
96 // for which we're not the only owner, which is undefined behaviour.
97 LOG_ALWAYS_FATAL_IF(editable == nullptr);
98 _do_copy(editable->data(), mStorage, mCount);
99 release_storage();
100 mStorage = editable->data();
101 }
102 }
103 return mStorage;
104 }
105
capacity() const106 size_t VectorImpl::capacity() const
107 {
108 if (mStorage) {
109 return SharedBuffer::bufferFromData(mStorage)->size() / mItemSize;
110 }
111 return 0;
112 }
113
insertVectorAt(const VectorImpl & vector,size_t index)114 ssize_t VectorImpl::insertVectorAt(const VectorImpl& vector, size_t index)
115 {
116 return insertArrayAt(vector.arrayImpl(), index, vector.size());
117 }
118
appendVector(const VectorImpl & vector)119 ssize_t VectorImpl::appendVector(const VectorImpl& vector)
120 {
121 return insertVectorAt(vector, size());
122 }
123
insertArrayAt(const void * array,size_t index,size_t length)124 ssize_t VectorImpl::insertArrayAt(const void* array, size_t index, size_t length)
125 {
126 if (index > size())
127 return BAD_INDEX;
128 void* where = _grow(index, length);
129 if (where) {
130 _do_copy(where, array, length);
131 }
132 return where ? index : (ssize_t)NO_MEMORY;
133 }
134
appendArray(const void * array,size_t length)135 ssize_t VectorImpl::appendArray(const void* array, size_t length)
136 {
137 return insertArrayAt(array, size(), length);
138 }
139
insertAt(size_t index,size_t numItems)140 ssize_t VectorImpl::insertAt(size_t index, size_t numItems)
141 {
142 return insertAt(nullptr, index, numItems);
143 }
144
insertAt(const void * item,size_t index,size_t numItems)145 ssize_t VectorImpl::insertAt(const void* item, size_t index, size_t numItems)
146 {
147 if (index > size())
148 return BAD_INDEX;
149 void* where = _grow(index, numItems);
150 if (where) {
151 if (item) {
152 _do_splat(where, item, numItems);
153 } else {
154 _do_construct(where, numItems);
155 }
156 }
157 return where ? index : (ssize_t)NO_MEMORY;
158 }
159
sortProxy(const void * lhs,const void * rhs,void * func)160 static int sortProxy(const void* lhs, const void* rhs, void* func)
161 {
162 return (*(VectorImpl::compar_t)func)(lhs, rhs);
163 }
164
sort(VectorImpl::compar_t cmp)165 status_t VectorImpl::sort(VectorImpl::compar_t cmp)
166 {
167 return sort(sortProxy, (void*)cmp);
168 }
169
sort(VectorImpl::compar_r_t cmp,void * state)170 status_t VectorImpl::sort(VectorImpl::compar_r_t cmp, void* state)
171 {
172 // the sort must be stable. we're using insertion sort which
173 // is well suited for small and already sorted arrays
174 // for big arrays, it could be better to use mergesort
175 const ssize_t count = size();
176 if (count > 1) {
177 void* array = const_cast<void*>(arrayImpl());
178 void* temp = nullptr;
179 ssize_t i = 1;
180 while (i < count) {
181 void* item = reinterpret_cast<char*>(array) + mItemSize*(i);
182 void* curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
183 if (cmp(curr, item, state) > 0) {
184
185 if (!temp) {
186 // we're going to have to modify the array...
187 array = editArrayImpl();
188 if (!array) return NO_MEMORY;
189 temp = malloc(mItemSize);
190 if (!temp) return NO_MEMORY;
191 item = reinterpret_cast<char*>(array) + mItemSize*(i);
192 curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
193 } else {
194 _do_destroy(temp, 1);
195 }
196
197 _do_copy(temp, item, 1);
198
199 ssize_t j = i-1;
200 void* next = reinterpret_cast<char*>(array) + mItemSize*(i);
201 do {
202 _do_destroy(next, 1);
203 _do_copy(next, curr, 1);
204 next = curr;
205 --j;
206 curr = nullptr;
207 if (j >= 0) {
208 curr = reinterpret_cast<char*>(array) + mItemSize*(j);
209 }
210 } while (j>=0 && (cmp(curr, temp, state) > 0));
211
212 _do_destroy(next, 1);
213 _do_copy(next, temp, 1);
214 }
215 i++;
216 }
217
218 if (temp) {
219 _do_destroy(temp, 1);
220 free(temp);
221 }
222 }
223 return OK;
224 }
225
pop()226 void VectorImpl::pop()
227 {
228 if (size())
229 removeItemsAt(size()-1, 1);
230 }
231
push()232 void VectorImpl::push()
233 {
234 push(nullptr);
235 }
236
push(const void * item)237 void VectorImpl::push(const void* item)
238 {
239 insertAt(item, size());
240 }
241
add()242 ssize_t VectorImpl::add()
243 {
244 return add(nullptr);
245 }
246
add(const void * item)247 ssize_t VectorImpl::add(const void* item)
248 {
249 return insertAt(item, size());
250 }
251
replaceAt(size_t index)252 ssize_t VectorImpl::replaceAt(size_t index)
253 {
254 return replaceAt(nullptr, index);
255 }
256
replaceAt(const void * prototype,size_t index)257 ssize_t VectorImpl::replaceAt(const void* prototype, size_t index)
258 {
259 ALOG_ASSERT(index<size(),
260 "[%p] replace: index=%d, size=%d", this, (int)index, (int)size());
261
262 if (index >= size()) {
263 return BAD_INDEX;
264 }
265
266 void* item = editItemLocation(index);
267 if (item != prototype) {
268 if (item == nullptr)
269 return NO_MEMORY;
270 _do_destroy(item, 1);
271 if (prototype == nullptr) {
272 _do_construct(item, 1);
273 } else {
274 _do_copy(item, prototype, 1);
275 }
276 }
277 return ssize_t(index);
278 }
279
removeItemsAt(size_t index,size_t count)280 ssize_t VectorImpl::removeItemsAt(size_t index, size_t count)
281 {
282 ALOG_ASSERT((index+count)<=size(),
283 "[%p] remove: index=%d, count=%d, size=%d",
284 this, (int)index, (int)count, (int)size());
285
286 if ((index+count) > size())
287 return BAD_VALUE;
288 _shrink(index, count);
289 return index;
290 }
291
finish_vector()292 void VectorImpl::finish_vector()
293 {
294 release_storage();
295 mStorage = nullptr;
296 mCount = 0;
297 }
298
clear()299 void VectorImpl::clear()
300 {
301 _shrink(0, mCount);
302 }
303
editItemLocation(size_t index)304 void* VectorImpl::editItemLocation(size_t index)
305 {
306 ALOG_ASSERT(index<capacity(),
307 "[%p] editItemLocation: index=%d, capacity=%d, count=%d",
308 this, (int)index, (int)capacity(), (int)mCount);
309
310 if (index < capacity()) {
311 void* buffer = editArrayImpl();
312 if (buffer) {
313 return reinterpret_cast<char*>(buffer) + index*mItemSize;
314 }
315 }
316 return nullptr;
317 }
318
itemLocation(size_t index) const319 const void* VectorImpl::itemLocation(size_t index) const
320 {
321 ALOG_ASSERT(index<capacity(),
322 "[%p] itemLocation: index=%d, capacity=%d, count=%d",
323 this, (int)index, (int)capacity(), (int)mCount);
324
325 if (index < capacity()) {
326 const void* buffer = arrayImpl();
327 if (buffer) {
328 return reinterpret_cast<const char*>(buffer) + index*mItemSize;
329 }
330 }
331 return nullptr;
332 }
333
setCapacity(size_t new_capacity)334 ssize_t VectorImpl::setCapacity(size_t new_capacity)
335 {
336 // The capacity must always be greater than or equal to the size
337 // of this vector.
338 if (new_capacity <= size()) {
339 return capacity();
340 }
341
342 size_t new_allocation_size = 0;
343 LOG_ALWAYS_FATAL_IF(__builtin_mul_overflow(new_capacity, mItemSize, &new_allocation_size));
344 SharedBuffer* sb = SharedBuffer::alloc(new_allocation_size);
345 if (sb) {
346 void* array = sb->data();
347 _do_copy(array, mStorage, size());
348 release_storage();
349 mStorage = const_cast<void*>(array);
350 } else {
351 return NO_MEMORY;
352 }
353 return new_capacity;
354 }
355
resize(size_t size)356 ssize_t VectorImpl::resize(size_t size) {
357 ssize_t result = OK;
358 if (size > mCount) {
359 result = insertAt(mCount, size - mCount);
360 } else if (size < mCount) {
361 result = removeItemsAt(size, mCount - size);
362 }
363 return result < 0 ? result : size;
364 }
365
release_storage()366 void VectorImpl::release_storage()
367 {
368 if (mStorage) {
369 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
370 if (sb->release(SharedBuffer::eKeepStorage) == 1) {
371 _do_destroy(mStorage, mCount);
372 SharedBuffer::dealloc(sb);
373 }
374 }
375 }
376
_grow(size_t where,size_t amount)377 void* VectorImpl::_grow(size_t where, size_t amount)
378 {
379 // ALOGV("_grow(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
380 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
381
382 ALOG_ASSERT(where <= mCount,
383 "[%p] _grow: where=%d, amount=%d, count=%d",
384 this, (int)where, (int)amount, (int)mCount); // caller already checked
385
386 size_t new_size;
387 LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(mCount, amount, &new_size), "new_size overflow");
388
389 if (capacity() < new_size) {
390 // NOTE: This implementation used to resize vectors as per ((3*x + 1) / 2)
391 // (sigh..). Also note, the " + 1" was necessary to handle the special case
392 // where x == 1, where the resized_capacity will be equal to the old
393 // capacity without the +1. The old calculation wouldn't work properly
394 // if x was zero.
395 //
396 // This approximates the old calculation, using (x + (x/2) + 1) instead.
397 size_t new_capacity = 0;
398 LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(new_size, (new_size / 2), &new_capacity),
399 "new_capacity overflow");
400 LOG_ALWAYS_FATAL_IF(
401 __builtin_add_overflow(new_capacity, static_cast<size_t>(1u), &new_capacity),
402 "new_capacity overflow");
403 new_capacity = max(kMinVectorCapacity, new_capacity);
404
405 size_t new_alloc_size = 0;
406 LOG_ALWAYS_FATAL_IF(__builtin_mul_overflow(new_capacity, mItemSize, &new_alloc_size),
407 "new_alloc_size overflow");
408
409 // ALOGV("grow vector %p, new_capacity=%d", this, (int)new_capacity);
410 if ((mStorage) &&
411 (mCount==where) &&
412 (mFlags & HAS_TRIVIAL_COPY) &&
413 (mFlags & HAS_TRIVIAL_DTOR))
414 {
415 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
416 SharedBuffer* sb = cur_sb->editResize(new_alloc_size);
417 if (sb) {
418 mStorage = sb->data();
419 } else {
420 return nullptr;
421 }
422 } else {
423 SharedBuffer* sb = SharedBuffer::alloc(new_alloc_size);
424 if (sb) {
425 void* array = sb->data();
426 if (where != 0) {
427 _do_copy(array, mStorage, where);
428 }
429 if (where != mCount) {
430 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + where*mItemSize;
431 void* dest = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
432 _do_copy(dest, from, mCount-where);
433 }
434 release_storage();
435 mStorage = const_cast<void*>(array);
436 } else {
437 return nullptr;
438 }
439 }
440 } else {
441 void* array = editArrayImpl();
442 if (where != mCount) {
443 const void* from = reinterpret_cast<const uint8_t *>(array) + where*mItemSize;
444 void* to = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
445 _do_move_forward(to, from, mCount - where);
446 }
447 }
448 mCount = new_size;
449 void* free_space = const_cast<void*>(itemLocation(where));
450 return free_space;
451 }
452
_shrink(size_t where,size_t amount)453 void VectorImpl::_shrink(size_t where, size_t amount)
454 {
455 if (!mStorage)
456 return;
457
458 // ALOGV("_shrink(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
459 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
460
461 ALOG_ASSERT(where + amount <= mCount,
462 "[%p] _shrink: where=%d, amount=%d, count=%d",
463 this, (int)where, (int)amount, (int)mCount); // caller already checked
464
465 size_t new_size;
466 LOG_ALWAYS_FATAL_IF(__builtin_sub_overflow(mCount, amount, &new_size));
467
468 if (new_size < (capacity() / 2)) {
469 // NOTE: (new_size * 2) is safe because capacity didn't overflow and
470 // new_size < (capacity / 2)).
471 const size_t new_capacity = max(kMinVectorCapacity, new_size * 2);
472
473 // NOTE: (new_capacity * mItemSize), (where * mItemSize) and
474 // ((where + amount) * mItemSize) beyond this point are safe because
475 // we are always reducing the capacity of the underlying SharedBuffer.
476 // In other words, (old_capacity * mItemSize) did not overflow, and
477 // where < (where + amount) < new_capacity < old_capacity.
478 if ((where == new_size) &&
479 (mFlags & HAS_TRIVIAL_COPY) &&
480 (mFlags & HAS_TRIVIAL_DTOR))
481 {
482 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
483 SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
484 if (sb) {
485 mStorage = sb->data();
486 } else {
487 return;
488 }
489 } else {
490 SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
491 if (sb) {
492 void* array = sb->data();
493 if (where != 0) {
494 _do_copy(array, mStorage, where);
495 }
496 if (where != new_size) {
497 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + (where+amount)*mItemSize;
498 void* dest = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
499 _do_copy(dest, from, new_size - where);
500 }
501 release_storage();
502 mStorage = const_cast<void*>(array);
503 } else{
504 return;
505 }
506 }
507 } else {
508 void* array = editArrayImpl();
509 void* to = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
510 _do_destroy(to, amount);
511 if (where != new_size) {
512 const void* from = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
513 _do_move_backward(to, from, new_size - where);
514 }
515 }
516 mCount = new_size;
517 }
518
itemSize() const519 size_t VectorImpl::itemSize() const {
520 return mItemSize;
521 }
522
_do_construct(void * storage,size_t num) const523 void VectorImpl::_do_construct(void* storage, size_t num) const
524 {
525 if (!(mFlags & HAS_TRIVIAL_CTOR)) {
526 do_construct(storage, num);
527 }
528 }
529
_do_destroy(void * storage,size_t num) const530 void VectorImpl::_do_destroy(void* storage, size_t num) const
531 {
532 if (!(mFlags & HAS_TRIVIAL_DTOR)) {
533 do_destroy(storage, num);
534 }
535 }
536
_do_copy(void * dest,const void * from,size_t num) const537 void VectorImpl::_do_copy(void* dest, const void* from, size_t num) const
538 {
539 if (!(mFlags & HAS_TRIVIAL_COPY)) {
540 do_copy(dest, from, num);
541 } else {
542 memcpy(dest, from, num*itemSize());
543 }
544 }
545
_do_splat(void * dest,const void * item,size_t num) const546 void VectorImpl::_do_splat(void* dest, const void* item, size_t num) const {
547 do_splat(dest, item, num);
548 }
549
_do_move_forward(void * dest,const void * from,size_t num) const550 void VectorImpl::_do_move_forward(void* dest, const void* from, size_t num) const {
551 do_move_forward(dest, from, num);
552 }
553
_do_move_backward(void * dest,const void * from,size_t num) const554 void VectorImpl::_do_move_backward(void* dest, const void* from, size_t num) const {
555 do_move_backward(dest, from, num);
556 }
557
558 /*****************************************************************************/
559
SortedVectorImpl(size_t itemSize,uint32_t flags)560 SortedVectorImpl::SortedVectorImpl(size_t itemSize, uint32_t flags)
561 : VectorImpl(itemSize, flags)
562 {
563 }
564
SortedVectorImpl(const VectorImpl & rhs)565 SortedVectorImpl::SortedVectorImpl(const VectorImpl& rhs)
566 : VectorImpl(rhs)
567 {
568 }
569
~SortedVectorImpl()570 SortedVectorImpl::~SortedVectorImpl()
571 {
572 }
573
operator =(const SortedVectorImpl & rhs)574 SortedVectorImpl& SortedVectorImpl::operator = (const SortedVectorImpl& rhs)
575 {
576 return static_cast<SortedVectorImpl&>( VectorImpl::operator = (static_cast<const VectorImpl&>(rhs)) );
577 }
578
indexOf(const void * item) const579 ssize_t SortedVectorImpl::indexOf(const void* item) const
580 {
581 return _indexOrderOf(item);
582 }
583
orderOf(const void * item) const584 size_t SortedVectorImpl::orderOf(const void* item) const
585 {
586 size_t o;
587 _indexOrderOf(item, &o);
588 return o;
589 }
590
_indexOrderOf(const void * item,size_t * order) const591 ssize_t SortedVectorImpl::_indexOrderOf(const void* item, size_t* order) const
592 {
593 if (order) *order = 0;
594 if (isEmpty()) {
595 return NAME_NOT_FOUND;
596 }
597 // binary search
598 ssize_t err = NAME_NOT_FOUND;
599 ssize_t l = 0;
600 ssize_t h = size()-1;
601 ssize_t mid;
602 const void* a = arrayImpl();
603 const size_t s = itemSize();
604 while (l <= h) {
605 mid = l + (h - l)/2;
606 const void* const curr = reinterpret_cast<const char *>(a) + (mid*s);
607 const int c = do_compare(curr, item);
608 if (c == 0) {
609 err = l = mid;
610 break;
611 } else if (c < 0) {
612 l = mid + 1;
613 } else {
614 h = mid - 1;
615 }
616 }
617 if (order) *order = l;
618 return err;
619 }
620
add(const void * item)621 ssize_t SortedVectorImpl::add(const void* item)
622 {
623 size_t order;
624 ssize_t index = _indexOrderOf(item, &order);
625 if (index < 0) {
626 index = VectorImpl::insertAt(item, order, 1);
627 } else {
628 index = VectorImpl::replaceAt(item, index);
629 }
630 return index;
631 }
632
merge(const VectorImpl & vector)633 ssize_t SortedVectorImpl::merge(const VectorImpl& vector)
634 {
635 // naive merge...
636 if (!vector.isEmpty()) {
637 const void* buffer = vector.arrayImpl();
638 const size_t is = itemSize();
639 size_t s = vector.size();
640 for (size_t i=0 ; i<s ; i++) {
641 ssize_t err = add( reinterpret_cast<const char*>(buffer) + i*is );
642 if (err<0) {
643 return err;
644 }
645 }
646 }
647 return OK;
648 }
649
merge(const SortedVectorImpl & vector)650 ssize_t SortedVectorImpl::merge(const SortedVectorImpl& vector)
651 {
652 // we've merging a sorted vector... nice!
653 ssize_t err = OK;
654 if (!vector.isEmpty()) {
655 // first take care of the case where the vectors are sorted together
656 if (do_compare(vector.itemLocation(vector.size()-1), arrayImpl()) <= 0) {
657 err = VectorImpl::insertVectorAt(static_cast<const VectorImpl&>(vector), 0);
658 } else if (do_compare(vector.arrayImpl(), itemLocation(size()-1)) >= 0) {
659 err = VectorImpl::appendVector(static_cast<const VectorImpl&>(vector));
660 } else {
661 // this could be made a little better
662 err = merge(static_cast<const VectorImpl&>(vector));
663 }
664 }
665 return err;
666 }
667
remove(const void * item)668 ssize_t SortedVectorImpl::remove(const void* item)
669 {
670 ssize_t i = indexOf(item);
671 if (i>=0) {
672 VectorImpl::removeItemsAt(i, 1);
673 }
674 return i;
675 }
676
677 /*****************************************************************************/
678
679 }; // namespace android
680