2 * Copyright (C) 2005 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #define LOG_TAG "Vector"
23 #include <cutils/log.h>
25 #include <utils/Errors.h>
26 #include <utils/SharedBuffer.h>
27 #include <utils/VectorImpl.h>
29 /*****************************************************************************/
34 // ----------------------------------------------------------------------------
36 const size_t kMinVectorCapacity = 4;
38 static inline size_t max(size_t a, size_t b) {
42 // ----------------------------------------------------------------------------
44 VectorImpl::VectorImpl(size_t itemSize, uint32_t flags)
45 : mStorage(0), mCount(0), mFlags(flags), mItemSize(itemSize)
49 VectorImpl::VectorImpl(const VectorImpl& rhs)
50 : mStorage(rhs.mStorage), mCount(rhs.mCount),
51 mFlags(rhs.mFlags), mItemSize(rhs.mItemSize)
54 SharedBuffer::bufferFromData(mStorage)->acquire();
58 VectorImpl::~VectorImpl()
61 "[%p] subclasses of VectorImpl must call finish_vector()"
62 " in their destructor. Leaking %d bytes.",
63 this, (int)(mCount*mItemSize));
64 // We can't call _do_destroy() here because the vtable is already gone.
67 VectorImpl& VectorImpl::operator = (const VectorImpl& rhs)
69 LOG_ALWAYS_FATAL_IF(mItemSize != rhs.mItemSize,
70 "Vector<> have different types (this=%p, rhs=%p)", this, &rhs);
74 mStorage = rhs.mStorage;
76 SharedBuffer::bufferFromData(mStorage)->acquire();
85 void* VectorImpl::editArrayImpl()
88 SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage)->attemptEdit();
90 sb = SharedBuffer::alloc(capacity() * mItemSize);
92 _do_copy(sb->data(), mStorage, mCount);
94 mStorage = sb->data();
101 size_t VectorImpl::capacity() const
104 return SharedBuffer::bufferFromData(mStorage)->size() / mItemSize;
109 ssize_t VectorImpl::insertVectorAt(const VectorImpl& vector, size_t index)
111 return insertArrayAt(vector.arrayImpl(), index, vector.size());
114 ssize_t VectorImpl::appendVector(const VectorImpl& vector)
116 return insertVectorAt(vector, size());
119 ssize_t VectorImpl::insertArrayAt(const void* array, size_t index, size_t length)
123 void* where = _grow(index, length);
125 _do_copy(where, array, length);
127 return where ? index : (ssize_t)NO_MEMORY;
130 ssize_t VectorImpl::appendArray(const void* array, size_t length)
132 return insertArrayAt(array, size(), length);
135 ssize_t VectorImpl::insertAt(size_t index, size_t numItems)
137 return insertAt(0, index, numItems);
140 ssize_t VectorImpl::insertAt(const void* item, size_t index, size_t numItems)
144 void* where = _grow(index, numItems);
147 _do_splat(where, item, numItems);
149 _do_construct(where, numItems);
152 return where ? index : (ssize_t)NO_MEMORY;
155 static int sortProxy(const void* lhs, const void* rhs, void* func)
157 return (*(VectorImpl::compar_t)func)(lhs, rhs);
160 status_t VectorImpl::sort(VectorImpl::compar_t cmp)
162 return sort(sortProxy, (void*)cmp);
165 status_t VectorImpl::sort(VectorImpl::compar_r_t cmp, void* state)
167 // the sort must be stable. we're using insertion sort which
168 // is well suited for small and already sorted arrays
169 // for big arrays, it could be better to use mergesort
170 const ssize_t count = size();
172 void* array = const_cast<void*>(arrayImpl());
176 void* item = reinterpret_cast<char*>(array) + mItemSize*(i);
177 void* curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
178 if (cmp(curr, item, state) > 0) {
181 // we're going to have to modify the array...
182 array = editArrayImpl();
183 if (!array) return NO_MEMORY;
184 temp = malloc(mItemSize);
185 if (!temp) return NO_MEMORY;
186 item = reinterpret_cast<char*>(array) + mItemSize*(i);
187 curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
189 _do_destroy(temp, 1);
192 _do_copy(temp, item, 1);
195 void* next = reinterpret_cast<char*>(array) + mItemSize*(i);
197 _do_destroy(next, 1);
198 _do_copy(next, curr, 1);
201 curr = reinterpret_cast<char*>(array) + mItemSize*(j);
202 } while (j>=0 && (cmp(curr, temp, state) > 0));
204 _do_destroy(next, 1);
205 _do_copy(next, temp, 1);
211 _do_destroy(temp, 1);
218 void VectorImpl::pop()
221 removeItemsAt(size()-1, 1);
224 void VectorImpl::push()
229 void VectorImpl::push(const void* item)
231 insertAt(item, size());
234 ssize_t VectorImpl::add()
239 ssize_t VectorImpl::add(const void* item)
241 return insertAt(item, size());
244 ssize_t VectorImpl::replaceAt(size_t index)
246 return replaceAt(0, index);
249 ssize_t VectorImpl::replaceAt(const void* prototype, size_t index)
251 ALOG_ASSERT(index<size(),
252 "[%p] replace: index=%d, size=%d", this, (int)index, (int)size());
254 if (index >= size()) {
258 void* item = editItemLocation(index);
259 if (item != prototype) {
262 _do_destroy(item, 1);
263 if (prototype == 0) {
264 _do_construct(item, 1);
266 _do_copy(item, prototype, 1);
269 return ssize_t(index);
272 ssize_t VectorImpl::removeItemsAt(size_t index, size_t count)
274 ALOG_ASSERT((index+count)<=size(),
275 "[%p] remove: index=%d, count=%d, size=%d",
276 this, (int)index, (int)count, (int)size());
278 if ((index+count) > size())
280 _shrink(index, count);
284 void VectorImpl::finish_vector()
291 void VectorImpl::clear()
296 void* VectorImpl::editItemLocation(size_t index)
298 ALOG_ASSERT(index<capacity(),
299 "[%p] editItemLocation: index=%d, capacity=%d, count=%d",
300 this, (int)index, (int)capacity(), (int)mCount);
302 if (index < capacity()) {
303 void* buffer = editArrayImpl();
305 return reinterpret_cast<char*>(buffer) + index*mItemSize;
311 const void* VectorImpl::itemLocation(size_t index) const
313 ALOG_ASSERT(index<capacity(),
314 "[%p] itemLocation: index=%d, capacity=%d, count=%d",
315 this, (int)index, (int)capacity(), (int)mCount);
317 if (index < capacity()) {
318 const void* buffer = arrayImpl();
320 return reinterpret_cast<const char*>(buffer) + index*mItemSize;
326 ssize_t VectorImpl::setCapacity(size_t new_capacity)
328 size_t current_capacity = capacity();
329 ssize_t amount = new_capacity - size();
331 // we can't reduce the capacity
332 return current_capacity;
334 SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
336 void* array = sb->data();
337 _do_copy(array, mStorage, size());
339 mStorage = const_cast<void*>(array);
346 void VectorImpl::release_storage()
349 const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
350 if (sb->release(SharedBuffer::eKeepStorage) == 1) {
351 _do_destroy(mStorage, mCount);
352 SharedBuffer::dealloc(sb);
357 void* VectorImpl::_grow(size_t where, size_t amount)
359 // ALOGV("_grow(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
360 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
362 ALOG_ASSERT(where <= mCount,
363 "[%p] _grow: where=%d, amount=%d, count=%d",
364 this, (int)where, (int)amount, (int)mCount); // caller already checked
366 const size_t new_size = mCount + amount;
367 if (capacity() < new_size) {
368 const size_t new_capacity = max(kMinVectorCapacity, ((new_size*3)+1)/2);
369 // ALOGV("grow vector %p, new_capacity=%d", this, (int)new_capacity);
372 (mFlags & HAS_TRIVIAL_COPY) &&
373 (mFlags & HAS_TRIVIAL_DTOR))
375 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
376 SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
377 mStorage = sb->data();
379 SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
381 void* array = sb->data();
383 _do_copy(array, mStorage, where);
385 if (where != mCount) {
386 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + where*mItemSize;
387 void* dest = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
388 _do_copy(dest, from, mCount-where);
391 mStorage = const_cast<void*>(array);
395 void* array = editArrayImpl();
396 if (where != mCount) {
397 const void* from = reinterpret_cast<const uint8_t *>(array) + where*mItemSize;
398 void* to = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
399 _do_move_forward(to, from, mCount - where);
403 void* free_space = const_cast<void*>(itemLocation(where));
407 void VectorImpl::_shrink(size_t where, size_t amount)
412 // ALOGV("_shrink(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
413 // this, (int)where, (int)amount, (int)mCount, (int)capacity());
415 ALOG_ASSERT(where + amount <= mCount,
416 "[%p] _shrink: where=%d, amount=%d, count=%d",
417 this, (int)where, (int)amount, (int)mCount); // caller already checked
419 const size_t new_size = mCount - amount;
420 if (new_size*3 < capacity()) {
421 const size_t new_capacity = max(kMinVectorCapacity, new_size*2);
422 // ALOGV("shrink vector %p, new_capacity=%d", this, (int)new_capacity);
423 if ((where == new_size) &&
424 (mFlags & HAS_TRIVIAL_COPY) &&
425 (mFlags & HAS_TRIVIAL_DTOR))
427 const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
428 SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
429 mStorage = sb->data();
431 SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
433 void* array = sb->data();
435 _do_copy(array, mStorage, where);
437 if (where != new_size) {
438 const void* from = reinterpret_cast<const uint8_t *>(mStorage) + (where+amount)*mItemSize;
439 void* dest = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
440 _do_copy(dest, from, new_size - where);
443 mStorage = const_cast<void*>(array);
447 void* array = editArrayImpl();
448 void* to = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
449 _do_destroy(to, amount);
450 if (where != new_size) {
451 const void* from = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
452 _do_move_backward(to, from, new_size - where);
458 size_t VectorImpl::itemSize() const {
462 void VectorImpl::_do_construct(void* storage, size_t num) const
464 if (!(mFlags & HAS_TRIVIAL_CTOR)) {
465 do_construct(storage, num);
469 void VectorImpl::_do_destroy(void* storage, size_t num) const
471 if (!(mFlags & HAS_TRIVIAL_DTOR)) {
472 do_destroy(storage, num);
476 void VectorImpl::_do_copy(void* dest, const void* from, size_t num) const
478 if (!(mFlags & HAS_TRIVIAL_COPY)) {
479 do_copy(dest, from, num);
481 memcpy(dest, from, num*itemSize());
485 void VectorImpl::_do_splat(void* dest, const void* item, size_t num) const {
486 do_splat(dest, item, num);
489 void VectorImpl::_do_move_forward(void* dest, const void* from, size_t num) const {
490 do_move_forward(dest, from, num);
493 void VectorImpl::_do_move_backward(void* dest, const void* from, size_t num) const {
494 do_move_backward(dest, from, num);
497 void VectorImpl::reservedVectorImpl1() { }
498 void VectorImpl::reservedVectorImpl2() { }
499 void VectorImpl::reservedVectorImpl3() { }
500 void VectorImpl::reservedVectorImpl4() { }
501 void VectorImpl::reservedVectorImpl5() { }
502 void VectorImpl::reservedVectorImpl6() { }
503 void VectorImpl::reservedVectorImpl7() { }
504 void VectorImpl::reservedVectorImpl8() { }
506 /*****************************************************************************/
508 SortedVectorImpl::SortedVectorImpl(size_t itemSize, uint32_t flags)
509 : VectorImpl(itemSize, flags)
513 SortedVectorImpl::SortedVectorImpl(const VectorImpl& rhs)
518 SortedVectorImpl::~SortedVectorImpl()
522 SortedVectorImpl& SortedVectorImpl::operator = (const SortedVectorImpl& rhs)
524 return static_cast<SortedVectorImpl&>( VectorImpl::operator = (static_cast<const VectorImpl&>(rhs)) );
527 ssize_t SortedVectorImpl::indexOf(const void* item) const
529 return _indexOrderOf(item);
532 size_t SortedVectorImpl::orderOf(const void* item) const
535 _indexOrderOf(item, &o);
539 ssize_t SortedVectorImpl::_indexOrderOf(const void* item, size_t* order) const
542 ssize_t err = NAME_NOT_FOUND;
544 ssize_t h = size()-1;
546 const void* a = arrayImpl();
547 const size_t s = itemSize();
550 const void* const curr = reinterpret_cast<const char *>(a) + (mid*s);
551 const int c = do_compare(curr, item);
561 if (order) *order = l;
565 ssize_t SortedVectorImpl::add(const void* item)
568 ssize_t index = _indexOrderOf(item, &order);
570 index = VectorImpl::insertAt(item, order, 1);
572 index = VectorImpl::replaceAt(item, index);
577 ssize_t SortedVectorImpl::merge(const VectorImpl& vector)
580 if (!vector.isEmpty()) {
581 const void* buffer = vector.arrayImpl();
582 const size_t is = itemSize();
583 size_t s = vector.size();
584 for (size_t i=0 ; i<s ; i++) {
585 ssize_t err = add( reinterpret_cast<const char*>(buffer) + i*is );
594 ssize_t SortedVectorImpl::merge(const SortedVectorImpl& vector)
596 // we've merging a sorted vector... nice!
597 ssize_t err = NO_ERROR;
598 if (!vector.isEmpty()) {
599 // first take care of the case where the vectors are sorted together
600 if (do_compare(vector.itemLocation(vector.size()-1), arrayImpl()) <= 0) {
601 err = VectorImpl::insertVectorAt(static_cast<const VectorImpl&>(vector), 0);
602 } else if (do_compare(vector.arrayImpl(), itemLocation(size()-1)) >= 0) {
603 err = VectorImpl::appendVector(static_cast<const VectorImpl&>(vector));
605 // this could be made a little better
606 err = merge(static_cast<const VectorImpl&>(vector));
612 ssize_t SortedVectorImpl::remove(const void* item)
614 ssize_t i = indexOf(item);
616 VectorImpl::removeItemsAt(i, 1);
621 void SortedVectorImpl::reservedSortedVectorImpl1() { };
622 void SortedVectorImpl::reservedSortedVectorImpl2() { };
623 void SortedVectorImpl::reservedSortedVectorImpl3() { };
624 void SortedVectorImpl::reservedSortedVectorImpl4() { };
625 void SortedVectorImpl::reservedSortedVectorImpl5() { };
626 void SortedVectorImpl::reservedSortedVectorImpl6() { };
627 void SortedVectorImpl::reservedSortedVectorImpl7() { };
628 void SortedVectorImpl::reservedSortedVectorImpl8() { };
631 /*****************************************************************************/
633 }; // namespace android