Arcane  v4.1.0.0
Documentation développeur
Chargement...
Recherche...
Aucune correspondance
HashTableMap2.h
1// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
2//-----------------------------------------------------------------------------
3// Copyright 2000-2025 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
4// See the top-level COPYRIGHT file for details.
5// SPDX-License-Identifier: Apache-2.0
6//-----------------------------------------------------------------------------
7#ifndef ARCANE_UTILS_HASHTABLEMAP2_H
8#define ARCANE_UTILS_HASHTABLEMAP2_H
9/*---------------------------------------------------------------------------*/
10/*---------------------------------------------------------------------------*/
11
13#include "arccore/common/AllocatedMemoryInfo.h"
14#include "arccore/common/IMemoryAllocator.h"
15
16// Version initiale issue du commit bdebddbdce1b473bbc189178fd523ef4a876ea01 (27 aout 2024)
17// emhash8::HashMap for C++14/17
18// version 1.6.5
19// https://github.com/ktprime/emhash/blob/master/hash_table8.hpp
20//
21// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
22// SPDX-License-Identifier: MIT
23// Copyright (c) 2021-2024 Huang Yuanbing & bailuzhou AT 163.com
24//
25// Permission is hereby granted, free of charge, to any person obtaining a copy
26// of this software and associated documentation files (the "Software"), to deal
27// in the Software without restriction, including without limitation the rights
28// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
29// copies of the Software, and to permit persons to whom the Software is
30// furnished to do so, subject to the following conditions:
31//
32// The above copyright notice and this permission notice shall be included in all
33// copies or substantial portions of the Software.
34//
35// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
38// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
39// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
40// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
41// SOFTWARE
42
43#include <cstring>
44#include <string>
45#include <cstdlib>
46#include <type_traits>
47#include <cassert>
48#include <utility>
49#include <cstdint>
50#include <functional>
51#include <iterator>
52#include <algorithm>
53#include <memory>
54
55#undef EMH_NEW
56#undef EMH_EMPTY
57
58// likely/unlikely
59#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
60#define EMH_LIKELY(condition) __builtin_expect(condition, 1)
61#define EMH_UNLIKELY(condition) __builtin_expect(condition, 0)
62#else
63#define EMH_LIKELY(condition) condition
64#define EMH_UNLIKELY(condition) condition
65#endif
66
67#define EMH_EMPTY(n) (0 > (int)(m_index[n].next))
68#define EMH_EQHASH(n, key_hash) (((size_type)(key_hash) & ~m_mask) == (m_index[n].slot & ~m_mask))
69#define EMH_NEW(key, val, bucket, key_hash) \
70 new (m_pairs + m_num_filled) value_type(key, val); \
71 m_etail = bucket; \
72 m_index[bucket] = { bucket, m_num_filled++ | ((size_type)(key_hash) & ~m_mask) }
73
74/*---------------------------------------------------------------------------*/
75/*---------------------------------------------------------------------------*/
76
77namespace Arcane::impl
78{
80class ARCANE_UTILS_EXPORT HashTableMap2Base
81{
82 public:
83
84 using size_type = uint32_t;
85
86 struct Index
87 {
88 size_type next;
89 size_type slot;
90 };
91
92 protected:
93
94 constexpr static size_type EAD = 2;
95
96 protected:
97
98 Index* m_index = nullptr;
99 uint32_t m_mlf = 0;
100 size_type m_mask = 0;
101 size_type m_num_buckets = 0;
102 size_type m_num_filled = 0;
103 size_type m_last = 0;
104 size_type m_etail = 0;
105 IMemoryAllocator* m_memory_allocator = _defaultAllocator();
106
107 private:
108
109 Int64 m_index_allocated_size = 0;
110
111 protected:
112
113 void _allocIndex(size_type num_buckets)
114 {
115 m_index_allocated_size = (uint64_t)(EAD + num_buckets) * sizeof(Index);
116 AllocatedMemoryInfo mem_info = m_memory_allocator->allocate({}, m_index_allocated_size);
117 m_index = reinterpret_cast<Index*>(mem_info.baseAddress());
118 }
119 void _freeIndex()
120 {
121 m_memory_allocator->deallocate({}, { m_index, m_index_allocated_size });
122 m_index = nullptr;
123 m_index_allocated_size = 0;
124 }
125
126 void _doSwap(HashTableMap2Base& rhs)
127 {
128 std::swap(m_index, rhs.m_index);
129 std::swap(m_num_buckets, rhs.m_num_buckets);
130 std::swap(m_num_filled, rhs.m_num_filled);
131 std::swap(m_mask, rhs.m_mask);
132 std::swap(m_mlf, rhs.m_mlf);
133 std::swap(m_last, rhs.m_last);
134 std::swap(m_etail, rhs.m_etail);
135 std::swap(m_index_allocated_size, rhs.m_index_allocated_size);
136 std::swap(m_memory_allocator, rhs.m_memory_allocator);
137 }
138
139 void _doClone(const HashTableMap2Base& rhs)
140 {
141 m_num_buckets = rhs.m_num_buckets;
142 m_num_filled = rhs.m_num_filled;
143 m_mlf = rhs.m_mlf;
144 m_last = rhs.m_last;
145 m_mask = rhs.m_mask;
146 m_etail = rhs.m_etail;
147 m_index_allocated_size = rhs.m_index_allocated_size;
148 m_memory_allocator = rhs.m_memory_allocator;
149 };
150
151 private:
152
153 static IMemoryAllocator* _defaultAllocator();
154};
155
156/*---------------------------------------------------------------------------*/
157/*---------------------------------------------------------------------------*/
163template <typename KeyT, typename ValueT,
164 typename HashT = std::hash<KeyT>,
165 typename EqT = std::equal_to<KeyT>>
166class HashTableMap2
167: public HashTableMap2Base
168{
169 constexpr static float EMH_DEFAULT_LOAD_FACTOR = 0.80f;
170 constexpr static float EMH_MIN_LOAD_FACTOR = 0.25f; //< 0.5
171 constexpr static uint32_t EMH_CACHE_LINE_SIZE = 64; //debug only
172
173 public:
174
175 using htype = HashTableMap2<KeyT, ValueT, HashT, EqT>;
176 using value_type = std::pair<KeyT, ValueT>;
177 using key_type = KeyT;
178 using mapped_type = ValueT;
179 using hasher = HashT;
180 using key_equal = EqT;
181
182 constexpr static size_type INACTIVE = 0xFFFFFFFF;
183 constexpr static size_type END = 0xFFFFFFFF;
184
185 class const_iterator;
186 class iterator
187 {
188 public:
189
190 using iterator_category = std::bidirectional_iterator_tag;
191 using difference_type = std::ptrdiff_t;
192 using value_type = typename htype::value_type;
193 using pointer = value_type*;
194 using const_pointer = const value_type*;
195 using reference = value_type&;
196 using const_reference = const value_type&;
197
198 iterator()
199 : kv_(nullptr)
200 {}
201 iterator(const_iterator& cit)
202 {
203 kv_ = cit.kv_;
204 }
205
206 iterator(const htype* hash_map, size_type bucket)
207 {
208 kv_ = hash_map->m_pairs + (int)bucket;
209 }
210
211 iterator& operator++()
212 {
213 kv_++;
214 return *this;
215 }
216
217 iterator operator++(int)
218 {
219 auto cur = *this;
220 kv_++;
221 return cur;
222 }
223
224 iterator& operator--()
225 {
226 kv_--;
227 return *this;
228 }
229
230 iterator operator--(int)
231 {
232 auto cur = *this;
233 kv_--;
234 return cur;
235 }
236
237 reference operator*() const { return *kv_; }
238 pointer operator->() const { return kv_; }
239
240 bool operator==(const iterator& rhs) const { return kv_ == rhs.kv_; }
241 bool operator!=(const iterator& rhs) const { return kv_ != rhs.kv_; }
242 bool operator==(const const_iterator& rhs) const { return kv_ == rhs.kv_; }
243 bool operator!=(const const_iterator& rhs) const { return kv_ != rhs.kv_; }
244
245 public:
246
247 value_type* kv_;
248 };
249
250 class const_iterator
251 {
252 public:
253
254 using iterator_category = std::bidirectional_iterator_tag;
255 using value_type = typename htype::value_type;
256 using difference_type = std::ptrdiff_t;
257 using pointer = value_type*;
258 using const_pointer = const value_type*;
259 using reference = value_type&;
260 using const_reference = const value_type&;
261
262 const_iterator(const iterator& it)
263 {
264 kv_ = it.kv_;
265 }
266
267 const_iterator(const htype* hash_map, size_type bucket)
268 {
269 kv_ = hash_map->m_pairs + (int)bucket;
270 }
271
272 const_iterator& operator++()
273 {
274 kv_++;
275 return *this;
276 }
277
278 const_iterator operator++(int)
279 {
280 auto cur = *this;
281 kv_++;
282 return cur;
283 }
284
285 const_iterator& operator--()
286 {
287 kv_--;
288 return *this;
289 }
290
291 const_iterator operator--(int)
292 {
293 auto cur = *this;
294 kv_--;
295 return cur;
296 }
297
298 const_reference operator*() const { return *kv_; }
299 const_pointer operator->() const { return kv_; }
300
301 bool operator==(const iterator& rhs) const { return kv_ == rhs.kv_; }
302 bool operator!=(const iterator& rhs) const { return kv_ != rhs.kv_; }
303 bool operator==(const const_iterator& rhs) const { return kv_ == rhs.kv_; }
304 bool operator!=(const const_iterator& rhs) const { return kv_ != rhs.kv_; }
305
306 public:
307
308 const value_type* kv_;
309 };
310
311 void init(size_type bucket, float mlf = EMH_DEFAULT_LOAD_FACTOR)
312 {
313 m_pairs = nullptr;
314 m_index = nullptr;
315 m_mask = m_num_buckets = 0;
316 m_num_filled = 0;
317 m_mlf = (uint32_t)((1 << 27) / EMH_DEFAULT_LOAD_FACTOR);
318 max_load_factor(mlf);
319 rehash(bucket);
320 }
321
322 HashTableMap2(size_type bucket = 2, float mlf = EMH_DEFAULT_LOAD_FACTOR)
323 {
324 init(bucket, mlf);
325 }
326
327 HashTableMap2(const HashTableMap2& rhs)
328 {
329 if (rhs.load_factor() > EMH_MIN_LOAD_FACTOR) {
330 m_pairs = _allocBucket((size_type)(rhs.m_num_buckets * rhs.max_load_factor()) + 4);
331 _allocIndex(rhs.m_num_buckets);
332 clone(rhs);
333 }
334 else {
335 init(rhs.m_num_filled + 2, rhs.max_load_factor());
336 for (auto it = rhs.begin(); it != rhs.end(); ++it)
337 insert_unique(it->first, it->second);
338 }
339 }
340
341 HashTableMap2(HashTableMap2&& rhs) noexcept
342 {
343 init(0);
344 *this = std::move(rhs);
345 }
346
347 HashTableMap2(std::initializer_list<value_type> ilist)
348 {
349 init((size_type)ilist.size());
350 for (auto it = ilist.begin(); it != ilist.end(); ++it)
351 _doInsert(*it);
352 }
353
354 template <class InputIt>
355 HashTableMap2(InputIt first, InputIt last, size_type bucket_count = 4)
356 {
357 init(std::distance(first, last) + bucket_count);
358 for (; first != last; ++first)
359 emplace(*first);
360 }
361
362 HashTableMap2& operator=(const HashTableMap2& rhs)
363 {
364 if (this == &rhs)
365 return *this;
366
367 if (rhs.load_factor() < EMH_MIN_LOAD_FACTOR) {
368 clear();
369 _freeBuckets();
370 rehash(rhs.m_num_filled + 2);
371 for (auto it = rhs.begin(); it != rhs.end(); ++it)
372 insert_unique(it->first, it->second);
373 return *this;
374 }
375
376 clearkv();
377
378 if (m_num_buckets != rhs.m_num_buckets) {
379 _freeIndex();
380 _freeBuckets();
381 _allocIndex(rhs.m_num_buckets);
382 m_pairs = _allocBucket((size_type)(rhs.m_num_buckets * rhs.max_load_factor()) + 4);
383 }
384
385 clone(rhs);
386 return *this;
387 }
388
389 HashTableMap2& operator=(HashTableMap2&& rhs) noexcept
390 {
391 if (this != &rhs) {
392 swap(rhs);
393 rhs.clear();
394 }
395 return *this;
396 }
397
398 template <typename Con>
399 bool operator==(const Con& rhs) const
400 {
401 if (size() != rhs.size())
402 return false;
403
404 for (auto it = begin(), last = end(); it != last; ++it) {
405 auto oi = rhs.find(it->first);
406 if (oi == rhs.end() || it->second != oi->second)
407 return false;
408 }
409 return true;
410 }
411
412 template <typename Con>
413 bool operator!=(const Con& rhs) const
414 {
415 return !(*this == rhs);
416 }
417
418 ~HashTableMap2() noexcept
419 {
420 clearkv();
421 _freeBuckets();
422 _freeIndex();
423 }
424
425 void clone(const HashTableMap2& rhs)
426 {
427 _doClone(rhs);
428 m_hasher = rhs.m_hasher;
429 m_pairs_allocated_size = rhs.m_pairs_allocated_size;
430
431 auto opairs = rhs.m_pairs;
432 memcpy((char*)m_index, (char*)rhs.m_index, (m_num_buckets + EAD) * sizeof(Index));
433
434 if (is_copy_trivially()) {
435 memcpy((char*)m_pairs, (char*)opairs, m_num_filled * sizeof(value_type));
436 }
437 else {
438 for (size_type slot = 0; slot < m_num_filled; slot++)
439 new (m_pairs + slot) value_type(opairs[slot]);
440 }
441 }
442
443 void swap(HashTableMap2& rhs)
444 {
445 _doSwap(rhs);
446 std::swap(m_hasher, rhs.m_hasher);
447 std::swap(m_pairs, rhs.m_pairs);
448 std::swap(m_pairs_allocated_size, rhs.m_pairs_allocated_size);
449 }
450
451 // -------------------------------------------------------------
452 iterator first() const
453 {
454 return { this, 0 };
455 }
456 iterator last() const
457 {
458 return { this, m_num_filled - 1 };
459 }
460
461 value_type& front()
462 {
463 return m_pairs[0];
464 }
465 const value_type& front() const
466 {
467 return m_pairs[0];
468 }
469 value_type& back()
470 {
471 return m_pairs[m_num_filled - 1];
472 }
473 const value_type& back() const
474 {
475 return m_pairs[m_num_filled - 1];
476 }
477
478 void pop_front()
479 {
480 erase(begin());
481 } //TODO. only erase first without move last
482 void pop_back()
483 {
484 erase(last());
485 }
486
487 iterator begin()
488 {
489 return first();
490 }
491 const_iterator cbegin() const
492 {
493 return first();
494 }
495 const_iterator begin() const
496 {
497 return first();
498 }
499
500 iterator end()
501 {
502 return { this, m_num_filled };
503 }
504 const_iterator cend() const
505 {
506 return { this, m_num_filled };
507 }
508 const_iterator end() const
509 {
510 return cend();
511 }
512
513 const value_type* values() const
514 {
515 return m_pairs;
516 }
517 const Index* index() const
518 {
519 return m_index;
520 }
521
522 size_type size() const
523 {
524 return m_num_filled;
525 }
526 bool empty() const
527 {
528 return m_num_filled == 0;
529 }
530 size_type bucket_count() const
531 {
532 return m_num_buckets;
533 }
534
536 double load_factor() const
537 {
538 return static_cast<double>(m_num_filled) / (m_mask + 1);
539 }
540
541 HashT& hash_function() const
542 {
543 return m_hasher;
544 }
545 EqT& key_eq() const
546 {
547 return m_eq;
548 }
549
550 void max_load_factor(double mlf)
551 {
552 if (mlf < 0.992 && mlf > EMH_MIN_LOAD_FACTOR) {
553 m_mlf = (uint32_t)((1 << 27) / mlf);
554 if (m_num_buckets > 0)
555 rehash(m_num_buckets);
556 }
557 }
558
559 constexpr double max_load_factor() const
560 {
561 return (1 << 27) / static_cast<double>(m_mlf);
562 }
563 constexpr size_type max_size() const
564 {
565 return (1ull << (sizeof(size_type) * 8 - 1));
566 }
567 constexpr size_type max_bucket_count() const
568 {
569 return max_size();
570 }
571
572 // ------------------------------------------------------------
573 template <typename K = KeyT>
574 iterator find(const K& key) noexcept
575 {
576 return { this, find_filled_slot(key) };
577 }
578
579 template <typename K = KeyT>
580 const_iterator find(const K& key) const noexcept
581 {
582 return { this, find_filled_slot(key) };
583 }
584
585 template <typename K = KeyT>
586 ValueT& at(const K& key)
587 {
588 const auto slot = find_filled_slot(key);
589 //throw
590 return m_pairs[slot].second;
591 }
592
593 template <typename K = KeyT>
594 const ValueT& at(const K& key) const
595 {
596 const auto slot = find_filled_slot(key);
597 //throw
598 return m_pairs[slot].second;
599 }
600
601 const ValueT& index(const uint32_t index) const
602 {
603 return m_pairs[index].second;
604 }
605
606 ValueT& index(const uint32_t index)
607 {
608 return m_pairs[index].second;
609 }
610
611 template <typename K = KeyT>
612 bool contains(const K& key) const noexcept
613 {
614 return find_filled_slot(key) != m_num_filled;
615 }
616
617 template <typename K = KeyT>
618 size_type count(const K& key) const noexcept
619 {
620 return find_filled_slot(key) == m_num_filled ? 0 : 1;
621 }
622
623 template <typename K = KeyT>
624 std::pair<iterator, iterator> equal_range(const K& key)
625 {
626 const auto found = find(key);
627 if (found.second == m_num_filled)
628 return { found, found };
629 else
630 return { found, std::next(found) };
631 }
632
633 void merge(HashTableMap2& rhs)
634 {
635 if (empty()) {
636 *this = std::move(rhs);
637 return;
638 }
639
640 for (auto rit = rhs.begin(); rit != rhs.end();) {
641 auto fit = find(rit->first);
642 if (fit == end()) {
643 insert_unique(rit->first, std::move(rit->second));
644 rit = rhs.erase(rit);
645 }
646 else {
647 ++rit;
648 }
649 }
650 }
651
652 std::pair<iterator, bool> add(const KeyT& key, const ValueT& value)
653 {
654 return insert(std::make_pair(key, value));
655 }
656
657 std::pair<iterator, bool> insert(const value_type& p)
658 {
659 check_expand_need();
660 return _doInsert(p);
661 }
662
663 std::pair<iterator, bool> insert(value_type&& p)
664 {
665 check_expand_need();
666 return _doInsert(std::move(p));
667 }
668
669 void insert(std::initializer_list<value_type> ilist)
670 {
671 reserve(ilist.size() + m_num_filled, false);
672 for (auto it = ilist.begin(); it != ilist.end(); ++it)
673 _doInsert(*it);
674 }
675
676 template <typename Iter>
677 void insert(Iter first, Iter last)
678 {
679 reserve(std::distance(first, last) + m_num_filled, false);
680 for (; first != last; ++first)
681 _doInsert(first->first, first->second);
682 }
683
684 template <class... Args>
685 std::pair<iterator, bool> emplace(Args&&... args) noexcept
686 {
687 check_expand_need();
688 return _doInsert(std::forward<Args>(args)...);
689 }
690
691 //no any optimize for position
692 template <class... Args>
693 iterator emplace_hint(const_iterator hint, Args&&... args)
694 {
695 (void)hint;
696 check_expand_need();
697 return _doInsert(std::forward<Args>(args)...).first;
698 }
699
700 template <class... Args>
701 std::pair<iterator, bool> try_emplace(const KeyT& k, Args&&... args)
702 {
703 check_expand_need();
704 return _doInsert(k, std::forward<Args>(args)...);
705 }
706
707 template <class... Args>
708 std::pair<iterator, bool> try_emplace(KeyT&& k, Args&&... args)
709 {
710 check_expand_need();
711 return _doInsert(std::move(k), std::forward<Args>(args)...);
712 }
713
714 template <class... Args>
715 size_type emplace_unique(Args&&... args)
716 {
717 return insert_unique(std::forward<Args>(args)...);
718 }
719
720 std::pair<iterator, bool> insert_or_assign(const KeyT& key, ValueT&& val)
721 {
722 return do_assign(key, std::forward<ValueT>(val));
723 }
724 std::pair<iterator, bool> insert_or_assign(KeyT&& key, ValueT&& val)
725 {
726 return do_assign(std::move(key), std::forward<ValueT>(val));
727 }
728
730 ValueT set_get(const KeyT& key, const ValueT& val)
731 {
732 check_expand_need();
733 const auto key_hash = hash_key(key);
734 const auto bucket = _findOrAllocate(key, key_hash);
735 if (EMH_EMPTY(bucket)) {
736 EMH_NEW(key, val, bucket, key_hash);
737 return ValueT();
738 }
739 else {
740 const auto slot = m_index[bucket].slot & m_mask;
741 ValueT old_value(val);
742 std::swap(m_pairs[slot].second, old_value);
743 return old_value;
744 }
745 }
746
748 ValueT& operator[](const KeyT& key) noexcept
749 {
750 check_expand_need();
751 const auto key_hash = hash_key(key);
752 const auto bucket = _findOrAllocate(key, key_hash);
753 if (EMH_EMPTY(bucket)) {
754 /* Check if inserting a value rather than overwriting an old entry */
755 EMH_NEW(key, std::move(ValueT()), bucket, key_hash);
756 }
757
758 const auto slot = m_index[bucket].slot & m_mask;
759 return m_pairs[slot].second;
760 }
761
762 ValueT& operator[](KeyT&& key) noexcept
763 {
764 check_expand_need();
765 const auto key_hash = hash_key(key);
766 const auto bucket = _findOrAllocate(key, key_hash);
767 if (EMH_EMPTY(bucket)) {
768 EMH_NEW(std::move(key), std::move(ValueT()), bucket, key_hash);
769 }
770
771 const auto slot = m_index[bucket].slot & m_mask;
772 return m_pairs[slot].second;
773 }
774
777 size_type erase(const KeyT& key) noexcept
778 {
779 const auto key_hash = hash_key(key);
780 const auto sbucket = find_filled_bucket(key, key_hash);
781 if (sbucket == INACTIVE)
782 return 0;
783
784 const auto main_bucket = key_hash & m_mask;
785 erase_slot(sbucket, (size_type)main_bucket);
786 return 1;
787 }
788
789 //iterator erase(const_iterator begin_it, const_iterator end_it)
790 iterator erase(const const_iterator& cit) noexcept
791 {
792 const auto slot = (size_type)(cit.kv_ - m_pairs);
793 size_type main_bucket;
794 const auto sbucket = find_slot_bucket(slot, main_bucket); //TODO
795 erase_slot(sbucket, main_bucket);
796 return { this, slot };
797 }
798
799 //only last >= first
800 iterator erase(const_iterator first, const_iterator last) noexcept
801 {
802 auto esize = long(last.kv_ - first.kv_);
803 auto tsize = long((m_pairs + m_num_filled) - last.kv_); //last to tail size
804 auto next = first;
805 while (tsize-- > 0) {
806 if (esize-- <= 0)
807 break;
808 next = ++erase(next);
809 }
810
811 //fast erase from last
812 next = this->last();
813 while (esize-- > 0)
814 next = --erase(next);
815
816 return { this, size_type(next.kv_ - m_pairs) };
817 }
818
819 template <typename Pred>
820 size_type erase_if(Pred pred)
821 {
822 auto old_size = size();
823 for (auto it = begin(); it != end();) {
824 if (pred(*it))
825 it = erase(it);
826 else
827 ++it;
828 }
829 return old_size - size();
830 }
831
832 static constexpr bool is_triviall_destructable()
833 {
834#if __cplusplus >= 201402L || _MSC_VER > 1600
835 return !(std::is_trivially_destructible<KeyT>::value && std::is_trivially_destructible<ValueT>::value);
836#else
837 return !(std::is_pod<KeyT>::value && std::is_pod<ValueT>::value);
838#endif
839 }
840
841 static constexpr bool is_copy_trivially()
842 {
843#if __cplusplus >= 201103L || _MSC_VER > 1600
844 return (std::is_trivially_copyable<KeyT>::value && std::is_trivially_copyable<ValueT>::value);
845#else
846 return (std::is_pod<KeyT>::value && std::is_pod<ValueT>::value);
847#endif
848 }
849
851 void clear() noexcept
852 {
853 clearkv();
854
855 if (m_num_filled > 0)
856 memset((char*)m_index, INACTIVE, sizeof(m_index[0]) * m_num_buckets);
857
858 m_last = m_num_filled = 0;
859 m_etail = INACTIVE;
860
861 }
862
863 void shrink_to_fit(const float min_factor = EMH_DEFAULT_LOAD_FACTOR / 4)
864 {
865 if (load_factor() < min_factor && bucket_count() > 10) //safe guard
866 rehash(m_num_filled + 1);
867 }
868
869
871 bool reserve(uint64_t num_elems, bool force)
872 {
873 (void)force;
874 const auto required_buckets = num_elems * m_mlf >> 27;
875 if (EMH_LIKELY(required_buckets < m_mask)) // && !force
876 return false;
877
878 //assert(required_buckets < max_size());
879 rehash(required_buckets + 2);
880 return true;
881 }
882
883 bool reserve(size_type required_buckets) noexcept
884 {
885 if (m_num_filled != required_buckets)
886 return reserve(required_buckets, true);
887
888 m_last = 0;
889
890 memset((char*)m_index, INACTIVE, sizeof(m_index[0]) * m_num_buckets);
891 for (size_type slot = 0; slot < m_num_filled; slot++) {
892 const auto& key = m_pairs[slot].first;
893 const auto key_hash = hash_key(key);
894 const auto bucket = size_type(key_hash & m_mask);
895 auto& next_bucket = m_index[bucket].next;
896 if ((int)next_bucket < 0)
897 m_index[bucket] = { 1, slot | ((size_type)(key_hash) & ~m_mask) };
898 else {
899 m_index[bucket].slot |= (size_type)(key_hash) & ~m_mask;
900 next_bucket++;
901 }
902 }
903 return true;
904 }
905
906 void rehash(uint64_t required_buckets)
907 {
908 if (required_buckets < m_num_filled)
909 return;
910
911 assert(required_buckets < max_size());
912 auto num_buckets = m_num_filled > (1u << 16) ? (1u << 16) : 4u;
913 while (num_buckets < required_buckets) {
914 num_buckets *= 2;
915 }
916 m_last = 0;
917
918 m_mask = num_buckets - 1;
919#if EMH_PACK_TAIL > 1
920 m_last = m_mask;
921 num_buckets += num_buckets * EMH_PACK_TAIL / 100; //add more 5-10%
922#endif
923 m_num_buckets = num_buckets;
924
925 rebuild(num_buckets);
926
927 m_etail = INACTIVE;
928 for (size_type slot = 0; slot < m_num_filled; ++slot) {
929 const auto& key = m_pairs[slot].first;
930 const auto key_hash = hash_key(key);
931 const auto bucket = _findUniqueBucket(key_hash);
932 m_index[bucket] = { bucket, slot | ((size_type)(key_hash) & ~m_mask) };
933 }
934 }
935
936 private:
937
938 void clearkv()
939 {
940 if (is_triviall_destructable()) {
941 while (m_num_filled--)
942 m_pairs[m_num_filled].~value_type();
943 }
944 }
945
946 void rebuild(size_type num_buckets) noexcept
947 {
948 _freeIndex();
949 auto new_pairs = _allocBucket((size_type)(num_buckets * max_load_factor()) + 4);
950 if (is_copy_trivially()) {
951 if (m_pairs)
952 memcpy((char*)new_pairs, (char*)m_pairs, m_num_filled * sizeof(value_type));
953 }
954 else {
955 for (size_type slot = 0; slot < m_num_filled; slot++) {
956 new (new_pairs + slot) value_type(std::move(m_pairs[slot]));
957 if (is_triviall_destructable())
958 m_pairs[slot].~value_type();
959 }
960 }
961 _freeBuckets();
962 m_pairs = new_pairs;
963 _allocIndex(num_buckets);
964
965 memset((char*)m_index, INACTIVE, sizeof(m_index[0]) * num_buckets);
966 memset((char*)(m_index + num_buckets), 0, sizeof(m_index[0]) * EAD);
967 }
968
969 void pack_zero(ValueT zero)
970 {
971 m_pairs[m_num_filled] = { KeyT(), zero };
972 }
973
975 bool try_get(const KeyT& key, ValueT& val) const noexcept
976 {
977 const auto slot = find_filled_slot(key);
978 const auto found = slot != m_num_filled;
979 if (found) {
980 val = m_pairs[slot].second;
981 }
982 return found;
983 }
984
986 ValueT* try_get(const KeyT& key) noexcept
987 {
988 const auto slot = find_filled_slot(key);
989 return slot != m_num_filled ? &m_pairs[slot].second : nullptr;
990 }
991
993 ValueT* try_get(const KeyT& key) const noexcept
994 {
995 const auto slot = find_filled_slot(key);
996 return slot != m_num_filled ? &m_pairs[slot].second : nullptr;
997 }
998
1000 bool try_set(const KeyT& key, const ValueT& val) noexcept
1001 {
1002 const auto slot = find_filled_slot(key);
1003 if (slot == m_num_filled)
1004 return false;
1005
1006 m_pairs[slot].second = val;
1007 return true;
1008 }
1009
1011 bool try_set(const KeyT& key, ValueT&& val) noexcept
1012 {
1013 const auto slot = find_filled_slot(key);
1014 if (slot == m_num_filled)
1015 return false;
1016
1017 m_pairs[slot].second = std::move(val);
1018 return true;
1019 }
1020
1022 ValueT get_or_return_default(const KeyT& key) const noexcept
1023 {
1024 const auto slot = find_filled_slot(key);
1025 return slot == m_num_filled ? ValueT() : m_pairs[slot].second;
1026 }
1027
1028 // -----------------------------------------------------
1029 std::pair<iterator, bool> _doInsert(const value_type& value) noexcept
1030 {
1031 const auto key_hash = hash_key(value.first);
1032 const auto bucket = _findOrAllocate(value.first, key_hash);
1033 const auto bempty = EMH_EMPTY(bucket);
1034 if (bempty) {
1035 EMH_NEW(value.first, value.second, bucket, key_hash);
1036 }
1037
1038 const auto slot = m_index[bucket].slot & m_mask;
1039 return { { this, slot }, bempty };
1040 }
1041
1042 std::pair<iterator, bool> _doInsert(value_type&& value) noexcept
1043 {
1044 const auto key_hash = hash_key(value.first);
1045 const auto bucket = _findOrAllocate(value.first, key_hash);
1046 const auto bempty = EMH_EMPTY(bucket);
1047 if (bempty) {
1048 EMH_NEW(std::move(value.first), std::move(value.second), bucket, key_hash);
1049 }
1050
1051 const auto slot = m_index[bucket].slot & m_mask;
1052 return { { this, slot }, bempty };
1053 }
1054
1055 template <typename K, typename V>
1056 std::pair<iterator, bool> _doInsert(K&& key, V&& val) noexcept
1057 {
1058 const auto key_hash = hash_key(key);
1059 const auto bucket = _findOrAllocate(key, key_hash);
1060 const auto bempty = EMH_EMPTY(bucket);
1061 if (bempty) {
1062 EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket, key_hash);
1063 }
1064
1065 const auto slot = m_index[bucket].slot & m_mask;
1066 return { { this, slot }, bempty };
1067 }
1068
1069 template <typename K, typename V>
1070 std::pair<iterator, bool> do_assign(K&& key, V&& val) noexcept
1071 {
1072 check_expand_need();
1073 const auto key_hash = hash_key(key);
1074 const auto bucket = _findOrAllocate(key, key_hash);
1075 const auto bempty = EMH_EMPTY(bucket);
1076 if (bempty) {
1077 EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket, key_hash);
1078 }
1079 else {
1080 m_pairs[m_index[bucket].slot & m_mask].second = std::forward(val);
1081 }
1082
1083 const auto slot = m_index[bucket].slot & m_mask;
1084 return { { this, slot }, bempty };
1085 }
1086
1087 template <typename K, typename V>
1088 size_type insert_unique(K&& key, V&& val)
1089 {
1090 check_expand_need();
1091 const auto key_hash = hash_key(key);
1092 auto bucket = _findUniqueBucket(key_hash);
1093 EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket, key_hash);
1094 return bucket;
1095 }
1096
1097 size_type insert_unique(value_type&& value)
1098 {
1099 return insert_unique(std::move(value.first), std::move(value.second));
1100 }
1101
1102 size_type insert_unique(const value_type& value)
1103 {
1104 return insert_unique(value.first, value.second);
1105 }
1106
1107 // Can we fit another element?
1108 bool check_expand_need()
1109 {
1110 return reserve(m_num_filled, false);
1111 }
1112
1113 static void prefetch_heap_block(char* ctrl)
1114 {
1115 // Prefetch the heap-allocated memory region to resolve potential TLB
1116 // misses. This is intended to overlap with execution of calculating the hash for a key.
1117#if __linux__
1118 __builtin_prefetch(static_cast<const void*>(ctrl));
1119#elif _WIN32
1120 // TODO: need to fix error:
1121 // error C2065: '_MM_HINT_T0': undeclared identifier
1122 //_mm_prefetch((const char*)ctrl, _MM_HINT_T0);
1123#endif
1124 }
1125
1126 size_type slot_to_bucket(const size_type slot) const noexcept
1127 {
1128 size_type main_bucket;
1129 return find_slot_bucket(slot, main_bucket); //TODO
1130 }
1131
1132 //very slow
1133 void erase_slot(const size_type sbucket, const size_type main_bucket) noexcept
1134 {
1135 const auto slot = m_index[sbucket].slot & m_mask;
1136 const auto ebucket = erase_bucket(sbucket, main_bucket);
1137 const auto last_slot = --m_num_filled;
1138 if (EMH_LIKELY(slot != last_slot)) {
1139 const auto last_bucket = (m_etail == INACTIVE || ebucket == m_etail)
1140 ? slot_to_bucket(last_slot)
1141 : m_etail;
1142
1143 m_pairs[slot] = std::move(m_pairs[last_slot]);
1144 m_index[last_bucket].slot = slot | (m_index[last_bucket].slot & ~m_mask);
1145 }
1146
1147 if (is_triviall_destructable())
1148 m_pairs[last_slot].~value_type();
1149
1150 m_etail = INACTIVE;
1151 m_index[ebucket] = { INACTIVE, 0 };
1152 }
1153
1154 size_type erase_bucket(const size_type bucket, const size_type main_bucket) noexcept
1155 {
1156 const auto next_bucket = m_index[bucket].next;
1157 if (bucket == main_bucket) {
1158 if (main_bucket != next_bucket) {
1159 const auto nbucket = m_index[next_bucket].next;
1160 m_index[main_bucket] = {
1161 (nbucket == next_bucket) ? main_bucket : nbucket,
1162 m_index[next_bucket].slot
1163 };
1164 }
1165 return next_bucket;
1166 }
1167
1168 const auto prev_bucket = find_prev_bucket(main_bucket, bucket);
1169 m_index[prev_bucket].next = (bucket == next_bucket) ? prev_bucket : next_bucket;
1170 return bucket;
1171 }
1172
1173 // Find the slot with this key, or return bucket size
1174 size_type find_slot_bucket(const size_type slot, size_type& main_bucket) const
1175 {
1176 const auto key_hash = hash_key(m_pairs[slot].first);
1177 const auto bucket = main_bucket = size_type(key_hash & m_mask);
1178 if (slot == (m_index[bucket].slot & m_mask))
1179 return bucket;
1180
1181 auto next_bucket = m_index[bucket].next;
1182 while (true) {
1183 if (EMH_LIKELY(slot == (m_index[next_bucket].slot & m_mask)))
1184 return next_bucket;
1185 next_bucket = m_index[next_bucket].next;
1186 }
1187
1188 return INACTIVE;
1189 }
1190
1191 // Find the slot with this key, or return bucket size
1192 size_type find_filled_bucket(const KeyT& key, uint64_t key_hash) const noexcept
1193 {
1194 const auto bucket = size_type(key_hash & m_mask);
1195 auto next_bucket = m_index[bucket].next;
1196 if (EMH_UNLIKELY((int)next_bucket < 0))
1197 return INACTIVE;
1198
1199 const auto slot = m_index[bucket].slot & m_mask;
1200 //prefetch_heap_block((char*)&m_pairs[slot]);
1201 if (EMH_EQHASH(bucket, key_hash)) {
1202 if (EMH_LIKELY(m_eq(key, m_pairs[slot].first)))
1203 return bucket;
1204 }
1205 if (next_bucket == bucket)
1206 return INACTIVE;
1207
1208 while (true) {
1209 if (EMH_EQHASH(next_bucket, key_hash)) {
1210 const auto slot = m_index[next_bucket].slot & m_mask;
1211 if (EMH_LIKELY(m_eq(key, m_pairs[slot].first)))
1212 return next_bucket;
1213 }
1214
1215 const auto nbucket = m_index[next_bucket].next;
1216 if (nbucket == next_bucket)
1217 return INACTIVE;
1218 next_bucket = nbucket;
1219 }
1220
1221 return INACTIVE;
1222 }
1223
1224 // Find the slot with this key, or return bucket size
1225 template <typename K = KeyT>
1226 size_type find_filled_slot(const K& key) const noexcept
1227 {
1228 const auto key_hash = hash_key(key);
1229 const auto bucket = size_type(key_hash & m_mask);
1230 auto next_bucket = m_index[bucket].next;
1231 if ((int)next_bucket < 0)
1232 return m_num_filled;
1233
1234 const auto slot = m_index[bucket].slot & m_mask;
1235 //prefetch_heap_block((char*)&m_pairs[slot]);
1236 if (EMH_EQHASH(bucket, key_hash)) {
1237 if (EMH_LIKELY(m_eq(key, m_pairs[slot].first)))
1238 return slot;
1239 }
1240 if (next_bucket == bucket)
1241 return m_num_filled;
1242
1243 while (true) {
1244 if (EMH_EQHASH(next_bucket, key_hash)) {
1245 const auto slot = m_index[next_bucket].slot & m_mask;
1246 if (EMH_LIKELY(m_eq(key, m_pairs[slot].first)))
1247 return slot;
1248 }
1249
1250 const auto nbucket = m_index[next_bucket].next;
1251 if (nbucket == next_bucket)
1252 return m_num_filled;
1253 next_bucket = nbucket;
1254 }
1255
1256 return m_num_filled;
1257 }
1258
1259 // kick out bucket and find empty to occupy
1260 // it will break the origin link and relink again.
1261 // before: main_bucket-->prev_bucket --> bucket --> next_bucket
1262 // after : main_bucket-->prev_bucket --> (removed)--> new_bucket--> next_bucket
1263 size_type kickout_bucket(const size_type kmain, const size_type bucket) noexcept
1264 {
1265 const auto next_bucket = m_index[bucket].next;
1266 const auto new_bucket = _findEmptyBucket(next_bucket, 2);
1267 const auto prev_bucket = find_prev_bucket(kmain, bucket);
1268
1269 const auto last = next_bucket == bucket ? new_bucket : next_bucket;
1270 m_index[new_bucket] = { last, m_index[bucket].slot };
1271
1272 m_index[prev_bucket].next = new_bucket;
1273 m_index[bucket].next = INACTIVE;
1274
1275 return bucket;
1276 }
1277
1278 /*
1279 ** inserts a new key into a hash table; first, check whether key's main
1280 ** bucket/position is free. If not, check whether colliding node/bucket is in its main
1281 ** position or not: if it is not, move colliding bucket to an empty place and
1282 ** put new key in its main position; otherwise (colliding bucket is in its main
1283 ** position), new key goes to an empty position.
1284 */
1285 template <typename K = KeyT>
1286 size_type _findOrAllocate(const K& key, uint64_t key_hash) noexcept
1287 {
1288 const auto bucket = size_type(key_hash & m_mask);
1289 auto next_bucket = m_index[bucket].next;
1290 prefetch_heap_block((char*)&m_pairs[bucket]);
1291 if ((int)next_bucket < 0) {
1292 return bucket;
1293 }
1294
1295 const auto slot = m_index[bucket].slot & m_mask;
1296 if (EMH_EQHASH(bucket, key_hash))
1297 if (EMH_LIKELY(m_eq(key, m_pairs[slot].first)))
1298 return bucket;
1299
1300 //check current bucket_key is in main bucket or not
1301 const auto kmain = hash_bucket(m_pairs[slot].first);
1302 if (kmain != bucket)
1303 return kickout_bucket(kmain, bucket);
1304 else if (next_bucket == bucket)
1305 return m_index[next_bucket].next = _findEmptyBucket(next_bucket, 1);
1306
1307 uint32_t csize = 1;
1308 //find next linked bucket and check key
1309 while (true) {
1310 const auto eslot = m_index[next_bucket].slot & m_mask;
1311 if (EMH_EQHASH(next_bucket, key_hash)) {
1312 if (EMH_LIKELY(m_eq(key, m_pairs[eslot].first)))
1313 return next_bucket;
1314 }
1315
1316 csize += 1;
1317 const auto nbucket = m_index[next_bucket].next;
1318 if (nbucket == next_bucket)
1319 break;
1320 next_bucket = nbucket;
1321 }
1322
1323 //find a empty and link it to tail
1324 const auto new_bucket = _findEmptyBucket(next_bucket, csize);
1325 prefetch_heap_block((char*)&m_pairs[new_bucket]);
1326 return m_index[next_bucket].next = new_bucket;
1327 }
1328
1329 size_type _findUniqueBucket(uint64_t key_hash) noexcept
1330 {
1331 const auto bucket = size_type(key_hash & m_mask);
1332 auto next_bucket = m_index[bucket].next;
1333 if ((int)next_bucket < 0) {
1334 return bucket;
1335 }
1336
1337 //check current bucket_key is in main bucket or not
1338 const auto kmain = hash_main(bucket);
1339 if (EMH_UNLIKELY(kmain != bucket))
1340 return kickout_bucket(kmain, bucket);
1341 else if (EMH_UNLIKELY(next_bucket != bucket))
1342 next_bucket = find_last_bucket(next_bucket);
1343
1344 return m_index[next_bucket].next = _findEmptyBucket(next_bucket, 2);
1345 }
1346
1347 /***
1348 Different probing techniques usually provide a trade-off between memory locality and avoidance of clustering.
1349 Since Robin Hood hashing is relatively resilient to clustering (both primary and secondary), linear probing is the most cache friendly alternativeis typically used.
1350
1351 It's the core algorithm of this hash map with highly optimization/benchmark.
1352 normaly linear probing is inefficient with high load factor, it use a new 3-way linear
1353 probing strategy to search empty slot. from benchmark even the load factor > 0.9, it's more 2-3 timer fast than
1354 one-way search strategy.
1355
1356 1. linear or quadratic probing a few cache line for less cache miss from input slot "bucket_from".
1357 2. the first search slot from member variant "m_last", init with 0
1358 3. the second search slot from calculated pos "(m_num_filled + m_last) & m_mask", it's like a rand value
1359 */
1360 // key is not in this mavalue. Find a place to put it.
1361 size_type _findEmptyBucket(const size_type bucket_from, uint32_t csize) noexcept
1362 {
1363 (void)csize;
1364
1365 auto bucket = bucket_from;
1366 if (EMH_EMPTY(++bucket) || EMH_EMPTY(++bucket))
1367 return bucket;
1368
1369#ifdef EMH_QUADRATIC
1370 constexpr size_type linear_probe_length = 2 * EMH_CACHE_LINE_SIZE / sizeof(Index); //16
1371 for (size_type offset = csize + 2, step = 4; offset <= linear_probe_length;) {
1372 bucket = (bucket_from + offset) & m_mask;
1373 if (EMH_EMPTY(bucket) || EMH_EMPTY(++bucket))
1374 return bucket;
1375 offset += step; //7/8. 12. 16
1376 }
1377#else
1378 constexpr size_type quadratic_probe_length = 6u;
1379 for (size_type offset = 4u, step = 3u; step < quadratic_probe_length;) {
1380 bucket = (bucket_from + offset) & m_mask;
1381 if (EMH_EMPTY(bucket) || EMH_EMPTY(++bucket))
1382 return bucket;
1383 offset += step++;
1384 }
1385#endif
1386
1387#if EMH_PREFETCH
1388 __builtin_prefetch(static_cast<const void*>(_index + m_last + 1), 0, EMH_PREFETCH);
1389#endif
1390
1391 for (;;) {
1392 m_last &= m_mask;
1393 if (EMH_EMPTY(++m_last)) // || EMH_EMPTY(++m_last))
1394 return m_last;
1395
1396 auto medium = (m_num_buckets / 2 + m_last) & m_mask;
1397 if (EMH_EMPTY(medium)) // || EMH_EMPTY(++medium))
1398 return medium;
1399 }
1400
1401 return 0;
1402 }
1403
1404 size_type find_last_bucket(size_type main_bucket) const
1405 {
1406 auto next_bucket = m_index[main_bucket].next;
1407 if (next_bucket == main_bucket)
1408 return main_bucket;
1409
1410 while (true) {
1411 const auto nbucket = m_index[next_bucket].next;
1412 if (nbucket == next_bucket)
1413 return next_bucket;
1414 next_bucket = nbucket;
1415 }
1416 }
1417
1418 size_type find_prev_bucket(const size_type main_bucket, const size_type bucket) const
1419 {
1420 auto next_bucket = m_index[main_bucket].next;
1421 if (next_bucket == bucket)
1422 return main_bucket;
1423
1424 while (true) {
1425 const auto nbucket = m_index[next_bucket].next;
1426 if (nbucket == bucket)
1427 return next_bucket;
1428 next_bucket = nbucket;
1429 }
1430 }
1431
1432 size_type hash_bucket(const KeyT& key) const noexcept
1433 {
1434 return (size_type)hash_key(key) & m_mask;
1435 }
1436
1437 size_type hash_main(const size_type bucket) const noexcept
1438 {
1439 const auto slot = m_index[bucket].slot & m_mask;
1440 return (size_type)hash_key(m_pairs[slot].first) & m_mask;
1441 }
1442
1443 private:
1444
1445 template <typename UType, typename std::enable_if<std::is_integral<UType>::value, uint32_t>::type = 0>
1446 inline uint64_t hash_key(const UType key) const
1447 {
1448 return m_hasher(key);
1449 }
1450
1451 template <typename UType, typename std::enable_if<std::is_same<UType, std::string>::value, uint32_t>::type = 0>
1452 inline uint64_t hash_key(const UType& key) const
1453 {
1454 return m_hasher(key);
1455 }
1456
1457 template <typename UType, typename std::enable_if<!std::is_integral<UType>::value && !std::is_same<UType, std::string>::value, uint32_t>::type = 0>
1458 inline uint64_t hash_key(const UType& key) const
1459 {
1460 return m_hasher(key);
1461 }
1462
1463 private:
1464
1465 value_type* m_pairs = nullptr;
1466 HashT m_hasher;
1467 EqT m_eq;
1468 Int64 m_pairs_allocated_size = 0;
1469
1470 private:
1471
1472 value_type* _allocBucket(size_type num_buckets)
1473 {
1474 m_pairs_allocated_size = (uint64_t)num_buckets * sizeof(value_type);
1475 AllocatedMemoryInfo mem_info = m_memory_allocator->allocate({}, m_pairs_allocated_size);
1476 return reinterpret_cast<value_type*>(mem_info.baseAddress());
1477 }
1478
1479 void _freeBuckets()
1480 {
1481 m_memory_allocator->deallocate({}, { m_pairs, m_pairs_allocated_size });
1482 m_pairs = nullptr;
1483 m_pairs_allocated_size = 0;
1484 }
1485};
1486
1487/*---------------------------------------------------------------------------*/
1488/*---------------------------------------------------------------------------*/
1489
1490} // namespace Arcane::impl
1491
1492/*---------------------------------------------------------------------------*/
1493/*---------------------------------------------------------------------------*/
1494
1495#undef EMH_EMPTY
1496#undef EMH_EQHASH
1497#undef EMH_NEW
1498
1499/*---------------------------------------------------------------------------*/
1500/*---------------------------------------------------------------------------*/
1501
1502#endif
1503
1504/*---------------------------------------------------------------------------*/
1505/*---------------------------------------------------------------------------*/
Déclarations des types utilisés dans Arcane.
Informations sur une zone mémoire allouée.
void * baseAddress() const
Adresse du début de la zone allouée.
Interface d'un allocateur pour la mémoire.
virtual void deallocate(MemoryAllocationArgs args, AllocatedMemoryInfo ptr)=0
Libère la mémoire dont l'adresse de base est ptr.
Base class for HashTableMap2.
Implementation of std::unordered_map.
bool reserve(uint64_t num_elems, bool force)
Make room for this many elements.
ValueT get_or_return_default(const KeyT &key) const noexcept
Convenience function.
ValueT * try_get(const KeyT &key) const noexcept
Const version of the above.
ValueT set_get(const KeyT &key, const ValueT &val)
Return the old value or ValueT() if it didn't exist.
size_type erase(const KeyT &key) noexcept
ValueT & operator[](const KeyT &key) noexcept
Like std::map<KeyT, ValueT>::operator[].
void clear() noexcept
Remove all elements, keeping full capacity.
bool try_get(const KeyT &key, ValueT &val) const noexcept
Returns the matching ValueT or nullptr if k isn't found.
double load_factor() const
Returns average number of elements per bucket.
ValueT * try_get(const KeyT &key) noexcept
Returns the matching ValueT or nullptr if k isn't found.
bool try_set(const KeyT &key, ValueT &&val) noexcept
set value if key exist
bool try_set(const KeyT &key, const ValueT &val) noexcept
set value if key exist
std::int64_t Int64
Type entier signé sur 64 bits.