169:
public AbstractArrayBase
173 typedef typename ArrayTraits<T>::ConstReferenceType ConstReferenceType;
174 typedef typename ArrayTraits<T>::IsPODType IsPODType;
199 typedef std::reverse_iterator<iterator> reverse_iterator;
200 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
229 static constexpr Int64 typeSize() {
return static_cast<Int64
>(
sizeof(T)); }
230 AllocatedMemoryInfo _currentMemoryInfo()
const
232 return AllocatedMemoryInfo(m_ptr, m_md->size * typeSize(), m_md->capacity * typeSize());
247 _createRange(0, asize, view.
data());
262 void* pre_allocated_buffer =
nullptr)
274 _internalDeallocate();
278 if (options.allocator() != m_md->_allocator())
289 operator Span<const T>()
const
291 return Span<const T>(m_ptr, m_md->
size);
293 operator SmallSpan<const T>()
const
295 return SmallSpan<const T>(m_ptr, ARCCORE_CAST_SMALL_SIZE(
size()));
301 Integer size()
const {
return ARCCORE_CAST_SMALL_SIZE(m_md->size); }
313 bool empty()
const {
return m_md->size == 0; }
317 const T* ptr = m_ptr;
318 for (
Int64 i = 0, n = m_md->size; i < n; ++i) {
330 ARCCORE_CHECK_AT(i, m_md->size);
336 ARCCORE_CHECK_AT(i, m_md->size);
345 m_md->_setMemoryLocationHint(new_hint, m_ptr,
sizeof(T));
356 m_md->m_host_device_memory_location = location;
362 return m_md->m_host_device_memory_location;
374 return !(rhs == lhs);
377 friend bool operator==(
const AbstractArray<T>& rhs,
const Span<const T>& lhs)
379 return operator==(Span<const T>(rhs), lhs);
382 friend bool operator!=(
const AbstractArray<T>& rhs,
const Span<const T>& lhs)
384 return !(rhs == lhs);
387 friend bool operator==(
const Span<const T>& rhs,
const AbstractArray<T>& lhs)
389 return operator==(rhs, Span<const T>(lhs));
392 friend bool operator!=(
const Span<const T>& rhs,
const AbstractArray<T>& lhs)
394 return !(rhs == lhs);
397 friend std::ostream& operator<<(std::ostream& o,
const AbstractArray<T>& val)
399 o << Span<const T>(val);
405 using AbstractArrayBase::m_meta_data;
419 if (new_capacity <= m_md->
capacity) {
422 if (m_meta_data.is_collective_allocator) {
438 if (_isSharedNull()) {
439 if (new_capacity != 0 || m_meta_data.is_collective_allocator)
440 _internalAllocate(new_capacity, queue);
444 Int64 acapacity = new_capacity;
445 if (compute_capacity) {
446 acapacity = m_md->capacity;
448 while (new_capacity > acapacity)
449 acapacity = (acapacity == 0) ? 4 : (acapacity + 1 + acapacity / 2);
455 if (m_meta_data.is_collective_allocator) {
456 _internalReallocate(m_md->capacity, queue);
460 _internalReallocate(acapacity, queue);
463 void _internalReallocate(
Int64 new_capacity,
RunQueue* queue)
465 if constexpr (std::is_trivially_copyable_v<T>) {
468 _directReAllocate(new_capacity, queue);
469 bool update = (new_capacity < old_capacity) || (m_ptr != old_ptr);
476 ArrayMetaData* old_md = m_md;
477 AllocatedMemoryInfo old_mem_info = _currentMemoryInfo();
478 Int64 old_size = m_md->size;
479 _directAllocate(new_capacity, queue);
480 if (m_ptr != old_ptr) {
481 for (
Int64 i = 0; i < old_size; ++i) {
482 new (m_ptr + i) T(old_ptr[i]);
485 m_md->nb_ref = old_md->nb_ref;
486 m_md->_deallocate(old_mem_info, queue);
493 void _internalDeallocate(RunQueue* queue =
nullptr)
497 if (!_isSharedNull())
498 m_md->_deallocate(_currentMemoryInfo(), queue);
499 if (m_md->is_not_null)
500 _deallocateMetaData(m_md);
502 void _internalAllocate(
Int64 new_capacity, RunQueue* queue)
504 _directAllocate(new_capacity, queue);
509 void _copyFromMemory(
const T* source)
511 m_md->_copyFromMemory(m_ptr, source,
sizeof(T),
_nullRunQueue());
525 void* pre_allocated_buffer =
nullptr)
528 if (!wanted_allocator) {
529 wanted_allocator = ArrayMetaData::_defaultAllocator();
530 options.setAllocator(wanted_allocator);
533 m_md->allocation_options = options;
534 if (new_capacity > 0) {
535 if (!pre_allocated_buffer)
536 _allocateMP(new_capacity,
nullptr);
538 _setMPCast(pre_allocated_buffer);
549 _allocateMP(new_capacity, queue);
552 void _allocateMP(Int64 new_capacity, RunQueue* queue)
554 _setMPCast(m_md->_allocate(new_capacity, typeSize(), queue));
557 void _directReAllocate(
Int64 new_capacity, RunQueue* queue)
559 _setMPCast(m_md->_reallocate(_currentMemoryInfo(), new_capacity, typeSize(), queue));
564 void changeAllocator(
const MemoryAllocationOptions& options, RunQueue* queue)
566 _setMPCast(m_md->_changeAllocator(options, _currentMemoryInfo(), typeSize(), queue));
570 void changeAllocator(
const MemoryAllocationOptions& options)
572 _setMPCast(m_md->_changeAllocator(options, _currentMemoryInfo(), typeSize(),
_nullRunQueue()));
578 void printInfos(std::ostream& o)
580 o <<
" Infos: size=" << m_md->size <<
" capacity=" << m_md->capacity <<
'\n';
597 Int64 s = m_md->size;
598 if ((s + n) > m_md->capacity)
600 for (
Int64 i = 0; i < n; ++i)
601 new (m_ptr + s + i) T(val);
609 const T* ptr = val.
data();
610 Int64 s = m_md->size;
611 if ((s + n) > m_md->capacity)
613 _createRange(s, s + n, ptr);
620 if (m_md->nb_ref == 0) {
627 _destroyRange(0, m_md->
size, IsPODType());
629 void _destroyRange(Int64, Int64, TrueType)
633 void _destroyRange(
Int64 abegin,
Int64 aend, FalseType)
637 for (
Int64 i = abegin; i < aend; ++i)
640 void _createRangeDefault(
Int64,
Int64, TrueType)
643 void _createRangeDefault(
Int64 abegin,
Int64 aend, FalseType)
647 for (
Int64 i = abegin; i < aend; ++i)
650 void _createRange(
Int64 abegin,
Int64 aend, ConstReferenceType value, TrueType)
654 for (
Int64 i = abegin; i < aend; ++i)
657 void _createRange(
Int64 abegin,
Int64 aend, ConstReferenceType value, FalseType)
661 for (
Int64 i = abegin; i < aend; ++i)
662 new (m_ptr + i) T(value);
664 void _createRange(
Int64 abegin,
Int64 aend,
const T* values)
668 for (
Int64 i = abegin; i < aend; ++i) {
669 new (m_ptr + i) T(*values);
673 void _fill(ConstReferenceType value)
675 for (
Int64 i = 0, n =
size(); i < n; ++i)
678 void _clone(
const ThatClassType& orig_array)
680 Int64 that_size = orig_array.size();
682 m_md->size = that_size;
683 m_md->dim1_size = orig_array.m_md->dim1_size;
684 m_md->dim2_size = orig_array.m_md->dim2_size;
685 _createRange(0, that_size, orig_array.m_ptr);
687 template <
typename PodType>
688 void _resizeHelper(
Int64 s, PodType pod_type, RunQueue* queue)
692 if (s > m_md->size) {
694 this->_createRangeDefault(m_md->size, s, pod_type);
697 this->_destroyRange(s, m_md->size, pod_type);
698 if (m_meta_data.is_collective_allocator) {
704 void _resize(
Int64 s)
711 _resizeHelper(s,
TrueType{}, queue);
715 this->_destroyRange(0, m_md->
size, IsPODType());
723 if (s > m_md->size) {
725 this->_createRange(m_md->size, s, value, IsPODType());
728 this->_destroyRange(s, m_md->size, IsPODType());
729 if (m_meta_data.is_collective_allocator) {
735 void _copy(
const T* rhs_begin,
TrueType)
737 _copyFromMemory(rhs_begin);
739 void _copy(
const T* rhs_begin, FalseType)
741 for (Int64 i = 0, n = m_md->
size; i < n; ++i)
742 m_ptr[i] = rhs_begin[i];
744 void _copy(
const T* rhs_begin)
746 _copy(rhs_begin, IsPODType());
759 const T* rhs_begin = rhs.
data();
761 const Int64 current_size = m_md->size;
764 if (abegin >= rhs_begin && abegin < (rhs_begin + rhs_size))
765 ArrayMetaData::overlapError(abegin, m_md->size, rhs_begin, rhs_size);
767 if (rhs_size > current_size) {
770 this->_createRange(m_md->size, rhs_size, rhs_begin + current_size);
773 m_md->size = rhs_size;
776 this->_destroyRange(rhs_size, current_size, IsPODType{});
777 m_md->size = rhs_size;
789 void _move(ThatClassType& rhs) ARCCORE_NOEXCEPT
813 void _swap(ThatClassType& rhs) ARCCORE_NOEXCEPT
815 std::swap(m_ptr, rhs.m_ptr);
825 void _shrink(Int64 new_capacity)
830 if (new_capacity > this->
capacity())
832 if (new_capacity < 4)
850 if (asize > max_size)
852 asize =
static_cast<Integer>(max_size);
861 Span<const T> rhs_span(rhs);
862 if (rhs.allocator() == this->allocator()) {
876 void _setMP(TrueImpl* new_mp)
881 void _setMP2(TrueImpl* new_mp, ArrayMetaData* new_md)
887 if (!m_md->is_allocated_by_new)
893 return m_ptr ==
nullptr;
898 void _setToSharedNull()
901 m_meta_data = ArrayMetaData();
904 void _setMPCast(
void* p)
906 _setMP(
reinterpret_cast<TrueImpl*
>(p));