Arcane  v3.14.10.0
Documentation développeur
Chargement...
Recherche...
Aucune correspondance
HashTableMap2.h
1// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
2//-----------------------------------------------------------------------------
3// Copyright 2000-2024 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
4// See the top-level COPYRIGHT file for details.
5// SPDX-License-Identifier: Apache-2.0
6//-----------------------------------------------------------------------------
7#ifndef ARCANE_UTILS_HASHTABLEMAP2_H
8#define ARCANE_UTILS_HASHTABLEMAP2_H
9/*---------------------------------------------------------------------------*/
10/*---------------------------------------------------------------------------*/
11
13#include "arccore/collections/IMemoryAllocator.h"
14
15// Version initiale issue du commit bdebddbdce1b473bbc189178fd523ef4a876ea01 (27 aout 2024)
16// emhash8::HashMap for C++14/17
17// version 1.6.5
18// https://github.com/ktprime/emhash/blob/master/hash_table8.hpp
19//
20// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
21// SPDX-License-Identifier: MIT
22// Copyright (c) 2021-2024 Huang Yuanbing & bailuzhou AT 163.com
23//
24// Permission is hereby granted, free of charge, to any person obtaining a copy
25// of this software and associated documentation files (the "Software"), to deal
26// in the Software without restriction, including without limitation the rights
27// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
28// copies of the Software, and to permit persons to whom the Software is
29// furnished to do so, subject to the following conditions:
30//
31// The above copyright notice and this permission notice shall be included in all
32// copies or substantial portions of the Software.
33//
34// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
37// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
38// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
39// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40// SOFTWARE
41
42#include <cstring>
43#include <string>
44#include <cstdlib>
45#include <type_traits>
46#include <cassert>
47#include <utility>
48#include <cstdint>
49#include <functional>
50#include <iterator>
51#include <algorithm>
52#include <memory>
53
54#undef EMH_NEW
55#undef EMH_EMPTY
56
57// likely/unlikely
58#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
59#define EMH_LIKELY(condition) __builtin_expect(condition, 1)
60#define EMH_UNLIKELY(condition) __builtin_expect(condition, 0)
61#else
62#define EMH_LIKELY(condition) condition
63#define EMH_UNLIKELY(condition) condition
64#endif
65
66#define EMH_EMPTY(n) (0 > (int)(m_index[n].next))
67#define EMH_EQHASH(n, key_hash) (((size_type)(key_hash) & ~m_mask) == (m_index[n].slot & ~m_mask))
68#define EMH_NEW(key, val, bucket, key_hash) \
69 new (m_pairs + m_num_filled) value_type(key, val); \
70 m_etail = bucket; \
71 m_index[bucket] = { bucket, m_num_filled++ | ((size_type)(key_hash) & ~m_mask) }
72
73/*---------------------------------------------------------------------------*/
74/*---------------------------------------------------------------------------*/
75
76namespace Arcane::impl
77{
79class ARCANE_UTILS_EXPORT HashTableMap2Base
80{
81 public:
82
83 using size_type = uint32_t;
84
85 struct Index
86 {
87 size_type next;
88 size_type slot;
89 };
90
91 protected:
92
93 constexpr static size_type EAD = 2;
94
95 protected:
96
97 Index* m_index = nullptr;
98 uint32_t m_mlf = 0;
99 size_type m_mask = 0;
100 size_type m_num_buckets = 0;
101 size_type m_num_filled = 0;
102 size_type m_last = 0;
103 size_type m_etail = 0;
104 IMemoryAllocator* m_memory_allocator = _defaultAllocator();
105
106 private:
107
108 Int64 m_index_allocated_size = 0;
109
110 protected:
111
112 void _allocIndex(size_type num_buckets)
113 {
114 m_index_allocated_size = (uint64_t)(EAD + num_buckets) * sizeof(Index);
115 AllocatedMemoryInfo mem_info = m_memory_allocator->allocate({}, m_index_allocated_size);
116 m_index = reinterpret_cast<Index*>(mem_info.baseAddress());
117 }
118 void _freeIndex()
119 {
120 m_memory_allocator->deallocate({}, { m_index, m_index_allocated_size });
121 m_index = nullptr;
122 m_index_allocated_size = 0;
123 }
124
125 void _doSwap(HashTableMap2Base& rhs)
126 {
127 std::swap(m_index, rhs.m_index);
128 std::swap(m_num_buckets, rhs.m_num_buckets);
129 std::swap(m_num_filled, rhs.m_num_filled);
130 std::swap(m_mask, rhs.m_mask);
131 std::swap(m_mlf, rhs.m_mlf);
132 std::swap(m_last, rhs.m_last);
133 std::swap(m_etail, rhs.m_etail);
134 std::swap(m_index_allocated_size, rhs.m_index_allocated_size);
135 std::swap(m_memory_allocator, rhs.m_memory_allocator);
136 }
137
138 void _doClone(const HashTableMap2Base& rhs)
139 {
140 m_num_buckets = rhs.m_num_buckets;
141 m_num_filled = rhs.m_num_filled;
142 m_mlf = rhs.m_mlf;
143 m_last = rhs.m_last;
144 m_mask = rhs.m_mask;
145 m_etail = rhs.m_etail;
146 m_index_allocated_size = rhs.m_index_allocated_size;
147 m_memory_allocator = rhs.m_memory_allocator;
148 };
149
150 private:
151
152 static IMemoryAllocator* _defaultAllocator();
153};
154
155/*---------------------------------------------------------------------------*/
156/*---------------------------------------------------------------------------*/
162template <typename KeyT, typename ValueT,
163 typename HashT = std::hash<KeyT>,
164 typename EqT = std::equal_to<KeyT>>
166: public HashTableMap2Base
167{
168 constexpr static double EMH_DEFAULT_LOAD_FACTOR = 0.80f;
169 constexpr static double EMH_MIN_LOAD_FACTOR = 0.25f; //< 0.5
170 constexpr static uint32_t EMH_CACHE_LINE_SIZE = 64; //debug only
171
172 public:
173
175 using value_type = std::pair<KeyT, ValueT>;
176 using key_type = KeyT;
177 using mapped_type = ValueT;
178 using hasher = HashT;
179 using key_equal = EqT;
180
181 constexpr static size_type INACTIVE = 0xFFFFFFFF;
182 constexpr static size_type END = 0xFFFFFFFF;
183
184 class const_iterator;
186 {
187 public:
188
189 using iterator_category = std::bidirectional_iterator_tag;
190 using difference_type = std::ptrdiff_t;
191 using value_type = typename htype::value_type;
192 using pointer = value_type*;
193 using const_pointer = const value_type*;
194 using reference = value_type&;
195 using const_reference = const value_type&;
196
197 iterator()
198 : kv_(nullptr)
199 {}
201 {
202 kv_ = cit.kv_;
203 }
204
206 {
207 kv_ = hash_map->m_pairs + (int)bucket;
208 }
209
210 iterator& operator++()
211 {
212 kv_++;
213 return *this;
214 }
215
216 iterator operator++(int)
217 {
218 auto cur = *this;
219 kv_++;
220 return cur;
221 }
222
223 iterator& operator--()
224 {
225 kv_--;
226 return *this;
227 }
228
229 iterator operator--(int)
230 {
231 auto cur = *this;
232 kv_--;
233 return cur;
234 }
235
236 reference operator*() const { return *kv_; }
237 pointer operator->() const { return kv_; }
238
239 bool operator==(const iterator& rhs) const { return kv_ == rhs.kv_; }
240 bool operator!=(const iterator& rhs) const { return kv_ != rhs.kv_; }
241 bool operator==(const const_iterator& rhs) const { return kv_ == rhs.kv_; }
242 bool operator!=(const const_iterator& rhs) const { return kv_ != rhs.kv_; }
243
244 public:
245
246 value_type* kv_;
247 };
248
250 {
251 public:
252
253 using iterator_category = std::bidirectional_iterator_tag;
254 using value_type = typename htype::value_type;
255 using difference_type = std::ptrdiff_t;
256 using pointer = value_type*;
257 using const_pointer = const value_type*;
258 using reference = value_type&;
259 using const_reference = const value_type&;
260
262 {
263 kv_ = it.kv_;
264 }
265
267 {
268 kv_ = hash_map->m_pairs + (int)bucket;
269 }
270
271 const_iterator& operator++()
272 {
273 kv_++;
274 return *this;
275 }
276
277 const_iterator operator++(int)
278 {
279 auto cur = *this;
280 kv_++;
281 return cur;
282 }
283
284 const_iterator& operator--()
285 {
286 kv_--;
287 return *this;
288 }
289
290 const_iterator operator--(int)
291 {
292 auto cur = *this;
293 kv_--;
294 return cur;
295 }
296
297 const_reference operator*() const { return *kv_; }
298 const_pointer operator->() const { return kv_; }
299
300 bool operator==(const iterator& rhs) const { return kv_ == rhs.kv_; }
301 bool operator!=(const iterator& rhs) const { return kv_ != rhs.kv_; }
302 bool operator==(const const_iterator& rhs) const { return kv_ == rhs.kv_; }
303 bool operator!=(const const_iterator& rhs) const { return kv_ != rhs.kv_; }
304
305 public:
306
307 const value_type* kv_;
308 };
309
310 void init(size_type bucket, float mlf = EMH_DEFAULT_LOAD_FACTOR)
311 {
312 m_pairs = nullptr;
313 m_index = nullptr;
314 m_mask = m_num_buckets = 0;
315 m_num_filled = 0;
316 m_mlf = (uint32_t)((1 << 27) / EMH_DEFAULT_LOAD_FACTOR);
317 max_load_factor(mlf);
318 rehash(bucket);
319 }
320
321 HashTableMap2(size_type bucket = 2, float mlf = EMH_DEFAULT_LOAD_FACTOR)
322 {
323 init(bucket, mlf);
324 }
325
327 {
328 if (rhs.load_factor() > EMH_MIN_LOAD_FACTOR) {
329 m_pairs = _allocBucket((size_type)(rhs.m_num_buckets * rhs.max_load_factor()) + 4);
330 _allocIndex(rhs.m_num_buckets);
331 clone(rhs);
332 }
333 else {
334 init(rhs.m_num_filled + 2, rhs.max_load_factor());
335 for (auto it = rhs.begin(); it != rhs.end(); ++it)
336 insert_unique(it->first, it->second);
337 }
338 }
339
340 HashTableMap2(HashTableMap2&& rhs) noexcept
341 {
342 init(0);
343 *this = std::move(rhs);
344 }
345
346 HashTableMap2(std::initializer_list<value_type> ilist)
347 {
348 init((size_type)ilist.size());
349 for (auto it = ilist.begin(); it != ilist.end(); ++it)
350 _doInsert(*it);
351 }
352
353 template <class InputIt>
354 HashTableMap2(InputIt first, InputIt last, size_type bucket_count = 4)
355 {
356 init(std::distance(first, last) + bucket_count);
357 for (; first != last; ++first)
358 emplace(*first);
359 }
360
361 HashTableMap2& operator=(const HashTableMap2& rhs)
362 {
363 if (this == &rhs)
364 return *this;
365
366 if (rhs.load_factor() < EMH_MIN_LOAD_FACTOR) {
367 clear();
368 _freeBuckets();
369 rehash(rhs.m_num_filled + 2);
370 for (auto it = rhs.begin(); it != rhs.end(); ++it)
371 insert_unique(it->first, it->second);
372 return *this;
373 }
374
375 clearkv();
376
377 if (m_num_buckets != rhs.m_num_buckets) {
378 _freeIndex();
379 _freeBuckets();
380 _allocIndex(rhs.m_num_buckets);
381 m_pairs = _allocBucket((size_type)(rhs.m_num_buckets * rhs.max_load_factor()) + 4);
382 }
383
384 clone(rhs);
385 return *this;
386 }
387
388 HashTableMap2& operator=(HashTableMap2&& rhs) noexcept
389 {
390 if (this != &rhs) {
391 swap(rhs);
392 rhs.clear();
393 }
394 return *this;
395 }
396
397 template <typename Con>
398 bool operator==(const Con& rhs) const
399 {
400 if (size() != rhs.size())
401 return false;
402
403 for (auto it = begin(), last = end(); it != last; ++it) {
404 auto oi = rhs.find(it->first);
405 if (oi == rhs.end() || it->second != oi->second)
406 return false;
407 }
408 return true;
409 }
410
411 template <typename Con>
412 bool operator!=(const Con& rhs) const
413 {
414 return !(*this == rhs);
415 }
416
417 ~HashTableMap2() noexcept
418 {
419 clearkv();
420 _freeBuckets();
421 _freeIndex();
422 }
423
424 void clone(const HashTableMap2& rhs)
425 {
426 _doClone(rhs);
427 m_hasher = rhs.m_hasher;
428 m_pairs_allocated_size = rhs.m_pairs_allocated_size;
429
430 auto opairs = rhs.m_pairs;
431 memcpy((char*)m_index, (char*)rhs.m_index, (m_num_buckets + EAD) * sizeof(Index));
432
433 if (is_copy_trivially()) {
434 memcpy((char*)m_pairs, (char*)opairs, m_num_filled * sizeof(value_type));
435 }
436 else {
437 for (size_type slot = 0; slot < m_num_filled; slot++)
438 new (m_pairs + slot) value_type(opairs[slot]);
439 }
440 }
441
442 void swap(HashTableMap2& rhs)
443 {
444 _doSwap(rhs);
445 std::swap(m_hasher, rhs.m_hasher);
446 std::swap(m_pairs, rhs.m_pairs);
447 std::swap(m_pairs_allocated_size, rhs.m_pairs_allocated_size);
448 }
449
450 // -------------------------------------------------------------
451 iterator first() const
452 {
453 return { this, 0 };
454 }
455 iterator last() const
456 {
457 return { this, m_num_filled - 1 };
458 }
459
460 value_type& front()
461 {
462 return m_pairs[0];
463 }
464 const value_type& front() const
465 {
466 return m_pairs[0];
467 }
468 value_type& back()
469 {
470 return m_pairs[m_num_filled - 1];
471 }
472 const value_type& back() const
473 {
474 return m_pairs[m_num_filled - 1];
475 }
476
477 void pop_front()
478 {
479 erase(begin());
480 } //TODO. only erase first without move last
481 void pop_back()
482 {
483 erase(last());
484 }
485
486 iterator begin()
487 {
488 return first();
489 }
490 const_iterator cbegin() const
491 {
492 return first();
493 }
494 const_iterator begin() const
495 {
496 return first();
497 }
498
499 iterator end()
500 {
501 return { this, m_num_filled };
502 }
503 const_iterator cend() const
504 {
505 return { this, m_num_filled };
506 }
507 const_iterator end() const
508 {
509 return cend();
510 }
511
512 const value_type* values() const
513 {
514 return m_pairs;
515 }
516 const Index* index() const
517 {
518 return m_index;
519 }
520
521 size_type size() const
522 {
523 return m_num_filled;
524 }
525 bool empty() const
526 {
527 return m_num_filled == 0;
528 }
529 size_type bucket_count() const
530 {
531 return m_num_buckets;
532 }
533
535 double load_factor() const
536 {
537 return static_cast<double>(m_num_filled) / (m_mask + 1);
538 }
539
540 HashT& hash_function() const
541 {
542 return m_hasher;
543 }
544 EqT& key_eq() const
545 {
546 return m_eq;
547 }
548
549 void max_load_factor(double mlf)
550 {
551 if (mlf < 0.992 && mlf > EMH_MIN_LOAD_FACTOR) {
552 m_mlf = (uint32_t)((1 << 27) / mlf);
553 if (m_num_buckets > 0)
554 rehash(m_num_buckets);
555 }
556 }
557
558 constexpr double max_load_factor() const
559 {
560 return (1 << 27) / static_cast<double>(m_mlf);
561 }
562 constexpr size_type max_size() const
563 {
564 return (1ull << (sizeof(size_type) * 8 - 1));
565 }
566 constexpr size_type max_bucket_count() const
567 {
568 return max_size();
569 }
570
571 // ------------------------------------------------------------
572 template <typename K = KeyT>
573 iterator find(const K& key) noexcept
574 {
575 return { this, find_filled_slot(key) };
576 }
577
578 template <typename K = KeyT>
579 const_iterator find(const K& key) const noexcept
580 {
581 return { this, find_filled_slot(key) };
582 }
583
584 template <typename K = KeyT>
585 ValueT& at(const K& key)
586 {
587 const auto slot = find_filled_slot(key);
588 //throw
589 return m_pairs[slot].second;
590 }
591
592 template <typename K = KeyT>
593 const ValueT& at(const K& key) const
594 {
595 const auto slot = find_filled_slot(key);
596 //throw
597 return m_pairs[slot].second;
598 }
599
600 const ValueT& index(const uint32_t index) const
601 {
602 return m_pairs[index].second;
603 }
604
605 ValueT& index(const uint32_t index)
606 {
607 return m_pairs[index].second;
608 }
609
610 template <typename K = KeyT>
611 bool contains(const K& key) const noexcept
612 {
613 return find_filled_slot(key) != m_num_filled;
614 }
615
616 template <typename K = KeyT>
617 size_type count(const K& key) const noexcept
618 {
619 return find_filled_slot(key) == m_num_filled ? 0 : 1;
620 }
621
622 template <typename K = KeyT>
623 std::pair<iterator, iterator> equal_range(const K& key)
624 {
625 const auto found = find(key);
626 if (found.second == m_num_filled)
627 return { found, found };
628 else
629 return { found, std::next(found) };
630 }
631
632 void merge(HashTableMap2& rhs)
633 {
634 if (empty()) {
635 *this = std::move(rhs);
636 return;
637 }
638
639 for (auto rit = rhs.begin(); rit != rhs.end();) {
640 auto fit = find(rit->first);
641 if (fit == end()) {
642 insert_unique(rit->first, std::move(rit->second));
643 rit = rhs.erase(rit);
644 }
645 else {
646 ++rit;
647 }
648 }
649 }
650
651 std::pair<iterator, bool> add(const KeyT& key, const ValueT& value)
652 {
653 return insert(std::make_pair(key, value));
654 }
655
656 std::pair<iterator, bool> insert(const value_type& p)
657 {
658 check_expand_need();
659 return _doInsert(p);
660 }
661
662 std::pair<iterator, bool> insert(value_type&& p)
663 {
664 check_expand_need();
665 return _doInsert(std::move(p));
666 }
667
668 void insert(std::initializer_list<value_type> ilist)
669 {
670 reserve(ilist.size() + m_num_filled, false);
671 for (auto it = ilist.begin(); it != ilist.end(); ++it)
672 _doInsert(*it);
673 }
674
675 template <typename Iter>
676 void insert(Iter first, Iter last)
677 {
678 reserve(std::distance(first, last) + m_num_filled, false);
679 for (; first != last; ++first)
680 _doInsert(first->first, first->second);
681 }
682
683 template <class... Args>
684 std::pair<iterator, bool> emplace(Args&&... args) noexcept
685 {
686 check_expand_need();
687 return _doInsert(std::forward<Args>(args)...);
688 }
689
690 //no any optimize for position
691 template <class... Args>
692 iterator emplace_hint(const_iterator hint, Args&&... args)
693 {
694 (void)hint;
695 check_expand_need();
696 return _doInsert(std::forward<Args>(args)...).first;
697 }
698
699 template <class... Args>
700 std::pair<iterator, bool> try_emplace(const KeyT& k, Args&&... args)
701 {
702 check_expand_need();
703 return _doInsert(k, std::forward<Args>(args)...);
704 }
705
706 template <class... Args>
707 std::pair<iterator, bool> try_emplace(KeyT&& k, Args&&... args)
708 {
709 check_expand_need();
710 return _doInsert(std::move(k), std::forward<Args>(args)...);
711 }
712
713 template <class... Args>
714 size_type emplace_unique(Args&&... args)
715 {
716 return insert_unique(std::forward<Args>(args)...);
717 }
718
719 std::pair<iterator, bool> insert_or_assign(const KeyT& key, ValueT&& val)
720 {
721 return do_assign(key, std::forward<ValueT>(val));
722 }
723 std::pair<iterator, bool> insert_or_assign(KeyT&& key, ValueT&& val)
724 {
725 return do_assign(std::move(key), std::forward<ValueT>(val));
726 }
727
729 ValueT set_get(const KeyT& key, const ValueT& val)
730 {
731 check_expand_need();
732 const auto key_hash = hash_key(key);
733 const auto bucket = _findOrAllocate(key, key_hash);
734 if (EMH_EMPTY(bucket)) {
735 EMH_NEW(key, val, bucket, key_hash);
736 return ValueT();
737 }
738 else {
739 const auto slot = m_index[bucket].slot & m_mask;
741 std::swap(m_pairs[slot].second, old_value);
742 return old_value;
743 }
744 }
745
747 ValueT& operator[](const KeyT& key) noexcept
748 {
749 check_expand_need();
750 const auto key_hash = hash_key(key);
751 const auto bucket = _findOrAllocate(key, key_hash);
752 if (EMH_EMPTY(bucket)) {
753 /* Check if inserting a value rather than overwriting an old entry */
754 EMH_NEW(key, std::move(ValueT()), bucket, key_hash);
755 }
756
757 const auto slot = m_index[bucket].slot & m_mask;
758 return m_pairs[slot].second;
759 }
760
761 ValueT& operator[](KeyT&& key) noexcept
762 {
763 check_expand_need();
764 const auto key_hash = hash_key(key);
765 const auto bucket = _findOrAllocate(key, key_hash);
766 if (EMH_EMPTY(bucket)) {
767 EMH_NEW(std::move(key), std::move(ValueT()), bucket, key_hash);
768 }
769
770 const auto slot = m_index[bucket].slot & m_mask;
771 return m_pairs[slot].second;
772 }
773
776 size_type erase(const KeyT& key) noexcept
777 {
778 const auto key_hash = hash_key(key);
779 const auto sbucket = find_filled_bucket(key, key_hash);
780 if (sbucket == INACTIVE)
781 return 0;
782
783 const auto main_bucket = key_hash & m_mask;
784 erase_slot(sbucket, (size_type)main_bucket);
785 return 1;
786 }
787
788 //iterator erase(const_iterator begin_it, const_iterator end_it)
789 iterator erase(const const_iterator& cit) noexcept
790 {
791 const auto slot = (size_type)(cit.kv_ - m_pairs);
792 size_type main_bucket;
793 const auto sbucket = find_slot_bucket(slot, main_bucket); //TODO
794 erase_slot(sbucket, main_bucket);
795 return { this, slot };
796 }
797
798 //only last >= first
799 iterator erase(const_iterator first, const_iterator last) noexcept
800 {
801 auto esize = long(last.kv_ - first.kv_);
802 auto tsize = long((m_pairs + m_num_filled) - last.kv_); //last to tail size
803 auto next = first;
804 while (tsize-- > 0) {
805 if (esize-- <= 0)
806 break;
807 next = ++erase(next);
808 }
809
810 //fast erase from last
811 next = this->last();
812 while (esize-- > 0)
813 next = --erase(next);
814
815 return { this, size_type(next.kv_ - m_pairs) };
816 }
817
818 template <typename Pred>
819 size_type erase_if(Pred pred)
820 {
821 auto old_size = size();
822 for (auto it = begin(); it != end();) {
823 if (pred(*it))
824 it = erase(it);
825 else
826 ++it;
827 }
828 return old_size - size();
829 }
830
831 static constexpr bool is_triviall_destructable()
832 {
833#if __cplusplus >= 201402L || _MSC_VER > 1600
834 return !(std::is_trivially_destructible<KeyT>::value && std::is_trivially_destructible<ValueT>::value);
835#else
836 return !(std::is_pod<KeyT>::value && std::is_pod<ValueT>::value);
837#endif
838 }
839
840 static constexpr bool is_copy_trivially()
841 {
842#if __cplusplus >= 201103L || _MSC_VER > 1600
843 return (std::is_trivially_copyable<KeyT>::value && std::is_trivially_copyable<ValueT>::value);
844#else
845 return (std::is_pod<KeyT>::value && std::is_pod<ValueT>::value);
846#endif
847 }
848
851 {
852 clearkv();
853
854 if (m_num_filled > 0)
855 memset((char*)m_index, INACTIVE, sizeof(m_index[0]) * m_num_buckets);
856
857 m_last = m_num_filled = 0;
858 m_etail = INACTIVE;
859
860 }
861
862 void shrink_to_fit(const float min_factor = EMH_DEFAULT_LOAD_FACTOR / 4)
863 {
864 if (load_factor() < min_factor && bucket_count() > 10) //safe guard
865 rehash(m_num_filled + 1);
866 }
867
868
871 {
872 (void)force;
873 const auto required_buckets = num_elems * m_mlf >> 27;
874 if (EMH_LIKELY(required_buckets < m_mask)) // && !force
875 return false;
876
877 //assert(required_buckets < max_size());
878 rehash(required_buckets + 2);
879 return true;
880 }
881
882 bool reserve(size_type required_buckets) noexcept
883 {
884 if (m_num_filled != required_buckets)
885 return reserve(required_buckets, true);
886
887 m_last = 0;
888
889 memset((char*)m_index, INACTIVE, sizeof(m_index[0]) * m_num_buckets);
890 for (size_type slot = 0; slot < m_num_filled; slot++) {
891 const auto& key = m_pairs[slot].first;
892 const auto key_hash = hash_key(key);
893 const auto bucket = size_type(key_hash & m_mask);
894 auto& next_bucket = m_index[bucket].next;
895 if ((int)next_bucket < 0)
896 m_index[bucket] = { 1, slot | ((size_type)(key_hash) & ~m_mask) };
897 else {
898 m_index[bucket].slot |= (size_type)(key_hash) & ~m_mask;
899 next_bucket++;
900 }
901 }
902 return true;
903 }
904
905 void rehash(uint64_t required_buckets)
906 {
907 if (required_buckets < m_num_filled)
908 return;
909
910 assert(required_buckets < max_size());
911 auto num_buckets = m_num_filled > (1u << 16) ? (1u << 16) : 4u;
912 while (num_buckets < required_buckets) {
913 num_buckets *= 2;
914 }
915 m_last = 0;
916
917 m_mask = num_buckets - 1;
918#if EMH_PACK_TAIL > 1
919 m_last = m_mask;
920 num_buckets += num_buckets * EMH_PACK_TAIL / 100; //add more 5-10%
921#endif
922 m_num_buckets = num_buckets;
923
924 rebuild(num_buckets);
925
926 m_etail = INACTIVE;
927 for (size_type slot = 0; slot < m_num_filled; ++slot) {
928 const auto& key = m_pairs[slot].first;
929 const auto key_hash = hash_key(key);
930 const auto bucket = _findUniqueBucket(key_hash);
931 m_index[bucket] = { bucket, slot | ((size_type)(key_hash) & ~m_mask) };
932 }
933 }
934
935 private:
936
937 void clearkv()
938 {
939 if (is_triviall_destructable()) {
940 while (m_num_filled--)
941 m_pairs[m_num_filled].~value_type();
942 }
943 }
944
945 void rebuild(size_type num_buckets) noexcept
946 {
947 _freeIndex();
948 auto new_pairs = _allocBucket((size_type)(num_buckets * max_load_factor()) + 4);
949 if (is_copy_trivially()) {
950 if (m_pairs)
951 memcpy((char*)new_pairs, (char*)m_pairs, m_num_filled * sizeof(value_type));
952 }
953 else {
954 for (size_type slot = 0; slot < m_num_filled; slot++) {
955 new (new_pairs + slot) value_type(std::move(m_pairs[slot]));
956 if (is_triviall_destructable())
957 m_pairs[slot].~value_type();
958 }
959 }
960 _freeBuckets();
961 m_pairs = new_pairs;
962 _allocIndex(num_buckets);
963
964 memset((char*)m_index, INACTIVE, sizeof(m_index[0]) * num_buckets);
965 memset((char*)(m_index + num_buckets), 0, sizeof(m_index[0]) * EAD);
966 }
967
968 void pack_zero(ValueT zero)
969 {
970 m_pairs[m_num_filled] = { KeyT(), zero };
971 }
972
974 bool try_get(const KeyT& key, ValueT& val) const noexcept
975 {
976 const auto slot = find_filled_slot(key);
977 const auto found = slot != m_num_filled;
978 if (found) {
979 val = m_pairs[slot].second;
980 }
981 return found;
982 }
983
985 ValueT* try_get(const KeyT& key) noexcept
986 {
987 const auto slot = find_filled_slot(key);
988 return slot != m_num_filled ? &m_pairs[slot].second : nullptr;
989 }
990
992 ValueT* try_get(const KeyT& key) const noexcept
993 {
994 const auto slot = find_filled_slot(key);
995 return slot != m_num_filled ? &m_pairs[slot].second : nullptr;
996 }
997
999 bool try_set(const KeyT& key, const ValueT& val) noexcept
1000 {
1001 const auto slot = find_filled_slot(key);
1002 if (slot == m_num_filled)
1003 return false;
1004
1005 m_pairs[slot].second = val;
1006 return true;
1007 }
1008
1010 bool try_set(const KeyT& key, ValueT&& val) noexcept
1011 {
1012 const auto slot = find_filled_slot(key);
1013 if (slot == m_num_filled)
1014 return false;
1015
1016 m_pairs[slot].second = std::move(val);
1017 return true;
1018 }
1019
1021 ValueT get_or_return_default(const KeyT& key) const noexcept
1022 {
1023 const auto slot = find_filled_slot(key);
1024 return slot == m_num_filled ? ValueT() : m_pairs[slot].second;
1025 }
1026
1027 // -----------------------------------------------------
1028 std::pair<iterator, bool> _doInsert(const value_type& value) noexcept
1029 {
1030 const auto key_hash = hash_key(value.first);
1031 const auto bucket = _findOrAllocate(value.first, key_hash);
1032 const auto bempty = EMH_EMPTY(bucket);
1033 if (bempty) {
1034 EMH_NEW(value.first, value.second, bucket, key_hash);
1035 }
1036
1037 const auto slot = m_index[bucket].slot & m_mask;
1038 return { { this, slot }, bempty };
1039 }
1040
1041 std::pair<iterator, bool> _doInsert(value_type&& value) noexcept
1042 {
1043 const auto key_hash = hash_key(value.first);
1044 const auto bucket = _findOrAllocate(value.first, key_hash);
1045 const auto bempty = EMH_EMPTY(bucket);
1046 if (bempty) {
1047 EMH_NEW(std::move(value.first), std::move(value.second), bucket, key_hash);
1048 }
1049
1050 const auto slot = m_index[bucket].slot & m_mask;
1051 return { { this, slot }, bempty };
1052 }
1053
1054 template <typename K, typename V>
1055 std::pair<iterator, bool> _doInsert(K&& key, V&& val) noexcept
1056 {
1057 const auto key_hash = hash_key(key);
1058 const auto bucket = _findOrAllocate(key, key_hash);
1059 const auto bempty = EMH_EMPTY(bucket);
1060 if (bempty) {
1061 EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket, key_hash);
1062 }
1063
1064 const auto slot = m_index[bucket].slot & m_mask;
1065 return { { this, slot }, bempty };
1066 }
1067
1068 template <typename K, typename V>
1069 std::pair<iterator, bool> do_assign(K&& key, V&& val) noexcept
1070 {
1071 check_expand_need();
1072 const auto key_hash = hash_key(key);
1073 const auto bucket = _findOrAllocate(key, key_hash);
1074 const auto bempty = EMH_EMPTY(bucket);
1075 if (bempty) {
1076 EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket, key_hash);
1077 }
1078 else {
1079 m_pairs[m_index[bucket].slot & m_mask].second = std::forward(val);
1080 }
1081
1082 const auto slot = m_index[bucket].slot & m_mask;
1083 return { { this, slot }, bempty };
1084 }
1085
1086 template <typename K, typename V>
1087 size_type insert_unique(K&& key, V&& val)
1088 {
1089 check_expand_need();
1090 const auto key_hash = hash_key(key);
1091 auto bucket = _findUniqueBucket(key_hash);
1092 EMH_NEW(std::forward<K>(key), std::forward<V>(val), bucket, key_hash);
1093 return bucket;
1094 }
1095
1096 size_type insert_unique(value_type&& value)
1097 {
1098 return insert_unique(std::move(value.first), std::move(value.second));
1099 }
1100
1101 size_type insert_unique(const value_type& value)
1102 {
1103 return insert_unique(value.first, value.second);
1104 }
1105
1106 // Can we fit another element?
1107 bool check_expand_need()
1108 {
1109 return reserve(m_num_filled, false);
1110 }
1111
1112 static void prefetch_heap_block(char* ctrl)
1113 {
1114 // Prefetch the heap-allocated memory region to resolve potential TLB
1115 // misses. This is intended to overlap with execution of calculating the hash for a key.
1116#if __linux__
1117 __builtin_prefetch(static_cast<const void*>(ctrl));
1118#elif _WIN32
1119 // TODO: need to fix error:
1120 // error C2065: '_MM_HINT_T0': undeclared identifier
1121 //_mm_prefetch((const char*)ctrl, _MM_HINT_T0);
1122#endif
1123 }
1124
1125 size_type slot_to_bucket(const size_type slot) const noexcept
1126 {
1127 size_type main_bucket;
1128 return find_slot_bucket(slot, main_bucket); //TODO
1129 }
1130
1131 //very slow
1132 void erase_slot(const size_type sbucket, const size_type main_bucket) noexcept
1133 {
1134 const auto slot = m_index[sbucket].slot & m_mask;
1135 const auto ebucket = erase_bucket(sbucket, main_bucket);
1136 const auto last_slot = --m_num_filled;
1137 if (EMH_LIKELY(slot != last_slot)) {
1138 const auto last_bucket = (m_etail == INACTIVE || ebucket == m_etail)
1139 ? slot_to_bucket(last_slot)
1140 : m_etail;
1141
1142 m_pairs[slot] = std::move(m_pairs[last_slot]);
1143 m_index[last_bucket].slot = slot | (m_index[last_bucket].slot & ~m_mask);
1144 }
1145
1146 if (is_triviall_destructable())
1147 m_pairs[last_slot].~value_type();
1148
1149 m_etail = INACTIVE;
1150 m_index[ebucket] = { INACTIVE, 0 };
1151 }
1152
1153 size_type erase_bucket(const size_type bucket, const size_type main_bucket) noexcept
1154 {
1155 const auto next_bucket = m_index[bucket].next;
1156 if (bucket == main_bucket) {
1157 if (main_bucket != next_bucket) {
1158 const auto nbucket = m_index[next_bucket].next;
1159 m_index[main_bucket] = {
1160 (nbucket == next_bucket) ? main_bucket : nbucket,
1161 m_index[next_bucket].slot
1162 };
1163 }
1164 return next_bucket;
1165 }
1166
1167 const auto prev_bucket = find_prev_bucket(main_bucket, bucket);
1168 m_index[prev_bucket].next = (bucket == next_bucket) ? prev_bucket : next_bucket;
1169 return bucket;
1170 }
1171
1172 // Find the slot with this key, or return bucket size
1173 size_type find_slot_bucket(const size_type slot, size_type& main_bucket) const
1174 {
1175 const auto key_hash = hash_key(m_pairs[slot].first);
1176 const auto bucket = main_bucket = size_type(key_hash & m_mask);
1177 if (slot == (m_index[bucket].slot & m_mask))
1178 return bucket;
1179
1180 auto next_bucket = m_index[bucket].next;
1181 while (true) {
1182 if (EMH_LIKELY(slot == (m_index[next_bucket].slot & m_mask)))
1183 return next_bucket;
1184 next_bucket = m_index[next_bucket].next;
1185 }
1186
1187 return INACTIVE;
1188 }
1189
1190 // Find the slot with this key, or return bucket size
1191 size_type find_filled_bucket(const KeyT& key, uint64_t key_hash) const noexcept
1192 {
1193 const auto bucket = size_type(key_hash & m_mask);
1194 auto next_bucket = m_index[bucket].next;
1195 if (EMH_UNLIKELY((int)next_bucket < 0))
1196 return INACTIVE;
1197
1198 const auto slot = m_index[bucket].slot & m_mask;
1199 //prefetch_heap_block((char*)&m_pairs[slot]);
1200 if (EMH_EQHASH(bucket, key_hash)) {
1201 if (EMH_LIKELY(m_eq(key, m_pairs[slot].first)))
1202 return bucket;
1203 }
1204 if (next_bucket == bucket)
1205 return INACTIVE;
1206
1207 while (true) {
1208 if (EMH_EQHASH(next_bucket, key_hash)) {
1209 const auto slot = m_index[next_bucket].slot & m_mask;
1210 if (EMH_LIKELY(m_eq(key, m_pairs[slot].first)))
1211 return next_bucket;
1212 }
1213
1214 const auto nbucket = m_index[next_bucket].next;
1215 if (nbucket == next_bucket)
1216 return INACTIVE;
1217 next_bucket = nbucket;
1218 }
1219
1220 return INACTIVE;
1221 }
1222
1223 // Find the slot with this key, or return bucket size
1224 template <typename K = KeyT>
1225 size_type find_filled_slot(const K& key) const noexcept
1226 {
1227 const auto key_hash = hash_key(key);
1228 const auto bucket = size_type(key_hash & m_mask);
1229 auto next_bucket = m_index[bucket].next;
1230 if ((int)next_bucket < 0)
1231 return m_num_filled;
1232
1233 const auto slot = m_index[bucket].slot & m_mask;
1234 //prefetch_heap_block((char*)&m_pairs[slot]);
1235 if (EMH_EQHASH(bucket, key_hash)) {
1236 if (EMH_LIKELY(m_eq(key, m_pairs[slot].first)))
1237 return slot;
1238 }
1239 if (next_bucket == bucket)
1240 return m_num_filled;
1241
1242 while (true) {
1243 if (EMH_EQHASH(next_bucket, key_hash)) {
1244 const auto slot = m_index[next_bucket].slot & m_mask;
1245 if (EMH_LIKELY(m_eq(key, m_pairs[slot].first)))
1246 return slot;
1247 }
1248
1249 const auto nbucket = m_index[next_bucket].next;
1250 if (nbucket == next_bucket)
1251 return m_num_filled;
1252 next_bucket = nbucket;
1253 }
1254
1255 return m_num_filled;
1256 }
1257
1258 // kick out bucket and find empty to occupy
1259 // it will break the origin link and relink again.
1260 // before: main_bucket-->prev_bucket --> bucket --> next_bucket
1261 // after : main_bucket-->prev_bucket --> (removed)--> new_bucket--> next_bucket
1262 size_type kickout_bucket(const size_type kmain, const size_type bucket) noexcept
1263 {
1264 const auto next_bucket = m_index[bucket].next;
1265 const auto new_bucket = _findEmptyBucket(next_bucket, 2);
1266 const auto prev_bucket = find_prev_bucket(kmain, bucket);
1267
1268 const auto last = next_bucket == bucket ? new_bucket : next_bucket;
1269 m_index[new_bucket] = { last, m_index[bucket].slot };
1270
1271 m_index[prev_bucket].next = new_bucket;
1272 m_index[bucket].next = INACTIVE;
1273
1274 return bucket;
1275 }
1276
1277 /*
1278 ** inserts a new key into a hash table; first, check whether key's main
1279 ** bucket/position is free. If not, check whether colliding node/bucket is in its main
1280 ** position or not: if it is not, move colliding bucket to an empty place and
1281 ** put new key in its main position; otherwise (colliding bucket is in its main
1282 ** position), new key goes to an empty position.
1283 */
1284 template <typename K = KeyT>
1285 size_type _findOrAllocate(const K& key, uint64_t key_hash) noexcept
1286 {
1287 const auto bucket = size_type(key_hash & m_mask);
1288 auto next_bucket = m_index[bucket].next;
1289 prefetch_heap_block((char*)&m_pairs[bucket]);
1290 if ((int)next_bucket < 0) {
1291 return bucket;
1292 }
1293
1294 const auto slot = m_index[bucket].slot & m_mask;
1295 if (EMH_EQHASH(bucket, key_hash))
1296 if (EMH_LIKELY(m_eq(key, m_pairs[slot].first)))
1297 return bucket;
1298
1299 //check current bucket_key is in main bucket or not
1300 const auto kmain = hash_bucket(m_pairs[slot].first);
1301 if (kmain != bucket)
1302 return kickout_bucket(kmain, bucket);
1303 else if (next_bucket == bucket)
1304 return m_index[next_bucket].next = _findEmptyBucket(next_bucket, 1);
1305
1306 uint32_t csize = 1;
1307 //find next linked bucket and check key
1308 while (true) {
1309 const auto eslot = m_index[next_bucket].slot & m_mask;
1310 if (EMH_EQHASH(next_bucket, key_hash)) {
1311 if (EMH_LIKELY(m_eq(key, m_pairs[eslot].first)))
1312 return next_bucket;
1313 }
1314
1315 csize += 1;
1316 const auto nbucket = m_index[next_bucket].next;
1317 if (nbucket == next_bucket)
1318 break;
1319 next_bucket = nbucket;
1320 }
1321
1322 //find a empty and link it to tail
1323 const auto new_bucket = _findEmptyBucket(next_bucket, csize);
1324 prefetch_heap_block((char*)&m_pairs[new_bucket]);
1325 return m_index[next_bucket].next = new_bucket;
1326 }
1327
1328 size_type _findUniqueBucket(uint64_t key_hash) noexcept
1329 {
1330 const auto bucket = size_type(key_hash & m_mask);
1331 auto next_bucket = m_index[bucket].next;
1332 if ((int)next_bucket < 0) {
1333 return bucket;
1334 }
1335
1336 //check current bucket_key is in main bucket or not
1337 const auto kmain = hash_main(bucket);
1338 if (EMH_UNLIKELY(kmain != bucket))
1339 return kickout_bucket(kmain, bucket);
1340 else if (EMH_UNLIKELY(next_bucket != bucket))
1341 next_bucket = find_last_bucket(next_bucket);
1342
1343 return m_index[next_bucket].next = _findEmptyBucket(next_bucket, 2);
1344 }
1345
1346 /***
1347 Different probing techniques usually provide a trade-off between memory locality and avoidance of clustering.
1348 Since Robin Hood hashing is relatively resilient to clustering (both primary and secondary), linear probing is the most cache friendly alternativeis typically used.
1349
1350 It's the core algorithm of this hash map with highly optimization/benchmark.
1351 normaly linear probing is inefficient with high load factor, it use a new 3-way linear
1352 probing strategy to search empty slot. from benchmark even the load factor > 0.9, it's more 2-3 timer fast than
1353 one-way search strategy.
1354
1355 1. linear or quadratic probing a few cache line for less cache miss from input slot "bucket_from".
1356 2. the first search slot from member variant "m_last", init with 0
1357 3. the second search slot from calculated pos "(m_num_filled + m_last) & m_mask", it's like a rand value
1358 */
1359 // key is not in this mavalue. Find a place to put it.
1360 size_type _findEmptyBucket(const size_type bucket_from, uint32_t csize) noexcept
1361 {
1362 (void)csize;
1363
1364 auto bucket = bucket_from;
1365 if (EMH_EMPTY(++bucket) || EMH_EMPTY(++bucket))
1366 return bucket;
1367
1368#ifdef EMH_QUADRATIC
1369 constexpr size_type linear_probe_length = 2 * EMH_CACHE_LINE_SIZE / sizeof(Index); //16
1370 for (size_type offset = csize + 2, step = 4; offset <= linear_probe_length;) {
1371 bucket = (bucket_from + offset) & m_mask;
1372 if (EMH_EMPTY(bucket) || EMH_EMPTY(++bucket))
1373 return bucket;
1374 offset += step; //7/8. 12. 16
1375 }
1376#else
1377 constexpr size_type quadratic_probe_length = 6u;
1378 for (size_type offset = 4u, step = 3u; step < quadratic_probe_length;) {
1379 bucket = (bucket_from + offset) & m_mask;
1380 if (EMH_EMPTY(bucket) || EMH_EMPTY(++bucket))
1381 return bucket;
1382 offset += step++;
1383 }
1384#endif
1385
1386#if EMH_PREFETCH
1387 __builtin_prefetch(static_cast<const void*>(_index + m_last + 1), 0, EMH_PREFETCH);
1388#endif
1389
1390 for (;;) {
1391 m_last &= m_mask;
1392 if (EMH_EMPTY(++m_last)) // || EMH_EMPTY(++m_last))
1393 return m_last;
1394
1395 auto medium = (m_num_buckets / 2 + m_last) & m_mask;
1396 if (EMH_EMPTY(medium)) // || EMH_EMPTY(++medium))
1397 return medium;
1398 }
1399
1400 return 0;
1401 }
1402
1403 size_type find_last_bucket(size_type main_bucket) const
1404 {
1405 auto next_bucket = m_index[main_bucket].next;
1406 if (next_bucket == main_bucket)
1407 return main_bucket;
1408
1409 while (true) {
1410 const auto nbucket = m_index[next_bucket].next;
1411 if (nbucket == next_bucket)
1412 return next_bucket;
1413 next_bucket = nbucket;
1414 }
1415 }
1416
1417 size_type find_prev_bucket(const size_type main_bucket, const size_type bucket) const
1418 {
1419 auto next_bucket = m_index[main_bucket].next;
1420 if (next_bucket == bucket)
1421 return main_bucket;
1422
1423 while (true) {
1424 const auto nbucket = m_index[next_bucket].next;
1425 if (nbucket == bucket)
1426 return next_bucket;
1427 next_bucket = nbucket;
1428 }
1429 }
1430
1431 size_type hash_bucket(const KeyT& key) const noexcept
1432 {
1433 return (size_type)hash_key(key) & m_mask;
1434 }
1435
1436 size_type hash_main(const size_type bucket) const noexcept
1437 {
1438 const auto slot = m_index[bucket].slot & m_mask;
1439 return (size_type)hash_key(m_pairs[slot].first) & m_mask;
1440 }
1441
1442 private:
1443
1444 template <typename UType, typename std::enable_if<std::is_integral<UType>::value, uint32_t>::type = 0>
1445 inline uint64_t hash_key(const UType key) const
1446 {
1447 return m_hasher(key);
1448 }
1449
1450 template <typename UType, typename std::enable_if<std::is_same<UType, std::string>::value, uint32_t>::type = 0>
1451 inline uint64_t hash_key(const UType& key) const
1452 {
1453 return m_hasher(key);
1454 }
1455
1456 template <typename UType, typename std::enable_if<!std::is_integral<UType>::value && !std::is_same<UType, std::string>::value, uint32_t>::type = 0>
1457 inline uint64_t hash_key(const UType& key) const
1458 {
1459 return m_hasher(key);
1460 }
1461
1462 private:
1463
1464 value_type* m_pairs = nullptr;
1465 HashT m_hasher;
1466 EqT m_eq;
1467 Int64 m_pairs_allocated_size = 0;
1468
1469 private:
1470
1471 value_type* _allocBucket(size_type num_buckets)
1472 {
1473 m_pairs_allocated_size = (uint64_t)num_buckets * sizeof(value_type);
1474 AllocatedMemoryInfo mem_info = m_memory_allocator->allocate({}, m_pairs_allocated_size);
1475 return reinterpret_cast<value_type*>(mem_info.baseAddress());
1476 }
1477
1478 void _freeBuckets()
1479 {
1480 m_memory_allocator->deallocate({}, { m_pairs, m_pairs_allocated_size });
1481 m_pairs = nullptr;
1482 m_pairs_allocated_size = 0;
1483 }
1484};
1485
1486/*---------------------------------------------------------------------------*/
1487/*---------------------------------------------------------------------------*/
1488
1489} // namespace Arcane::impl
1490
1491/*---------------------------------------------------------------------------*/
1492/*---------------------------------------------------------------------------*/
1493
1494#undef EMH_EMPTY
1495#undef EMH_EQHASH
1496#undef EMH_NEW
1497
1498/*---------------------------------------------------------------------------*/
1499/*---------------------------------------------------------------------------*/
1500
1501#endif
1502
1503/*---------------------------------------------------------------------------*/
1504/*---------------------------------------------------------------------------*/
Déclarations des types utilisés dans Arcane.
Lecteur des fichiers de maillage via la bibliothèque LIMA.
Definition Lima.cc:120
Base class for HashTableMap2.
Implementation of std::unordered_map.
bool reserve(uint64_t num_elems, bool force)
Make room for this many elements.
ValueT get_or_return_default(const KeyT &key) const noexcept
Convenience function.
ValueT * try_get(const KeyT &key) const noexcept
Const version of the above.
ValueT set_get(const KeyT &key, const ValueT &val)
Return the old value or ValueT() if it didn't exist.
size_type erase(const KeyT &key) noexcept
ValueT & operator[](const KeyT &key) noexcept
Like std::map<KeyT, ValueT>::operator[].
void clear() noexcept
Remove all elements, keeping full capacity.
bool try_get(const KeyT &key, ValueT &val) const noexcept
Returns the matching ValueT or nullptr if k isn't found.
double load_factor() const
Returns average number of elements per bucket.
ValueT * try_get(const KeyT &key) noexcept
Returns the matching ValueT or nullptr if k isn't found.
bool try_set(const KeyT &key, ValueT &&val) noexcept
set value if key exist
bool try_set(const KeyT &key, const ValueT &val) noexcept
set value if key exist
Informations sur une zone mémoire allouée.
virtual AllocatedMemoryInfo allocate(MemoryAllocationArgs args, Int64 new_size)
Alloue de la mémoire pour new_size octets et retourne le pointeur.
virtual void deallocate(MemoryAllocationArgs args, AllocatedMemoryInfo ptr)
Libère la mémoire dont l'adresse de base est ptr.