3 #ifndef _CUCKOOHASH_MAP_HH 
    4 #define _CUCKOOHASH_MAP_HH 
   23 #include <type_traits> 
   29 #include "bucket_container.hh" 
   45 template <
class Key, 
class T, 
class Hash = std::hash<Key>,
 
   46           class KeyEqual = std::equal_to<Key>,
 
   47           class Allocator = std::allocator<std::pair<const Key, T>>,
 
   48           std::
size_t SLOT_PER_BUCKET = DEFAULT_SLOT_PER_BUCKET>
 
   52   using partial_t = uint8_t;
 
   62   using key_type = 
typename buckets_t::key_type;
 
   63   using mapped_type = 
typename buckets_t::mapped_type;
 
   70   using size_type = 
typename buckets_t::size_type;
 
   71   using difference_type = std::ptrdiff_t;
 
   73   using key_equal = KeyEqual;
 
   74   using allocator_type = 
typename buckets_t::allocator_type;
 
   75   using reference = 
typename buckets_t::reference;
 
   76   using const_reference = 
typename buckets_t::const_reference;
 
   77   using pointer = 
typename buckets_t::pointer;
 
   78   using const_pointer = 
typename buckets_t::const_pointer;
 
  105                  const KeyEqual &equal = KeyEqual(),
 
  106                  const Allocator &alloc = Allocator())
 
  107       : hash_fn_(hf), eq_fn_(equal),
 
  108         buckets_(reserve_calc(n), alloc),
 
  109         old_buckets_(0, alloc),
 
  111         num_remaining_lazy_rehash_locks_(0),
 
  114         max_num_worker_threads_(0) {
 
  116     all_locks_.back().resize(std::min(
bucket_count(), size_type(kMaxNumLocks)));
 
  131   template <
typename InputIt>
 
  133                  size_type n = DEFAULT_SIZE, 
const Hash &hf = Hash(),
 
  134                  const KeyEqual &equal = KeyEqual(),
 
  135                  const Allocator &alloc = Allocator())
 
  137     for (; first != last; ++first) {
 
  138       insert(first->first, first->second);
 
  158       : hash_fn_(other.hash_fn_), eq_fn_(other.eq_fn_),
 
  159         buckets_(other.buckets_, alloc),
 
  160         old_buckets_(other.old_buckets_, alloc),
 
  162         num_remaining_lazy_rehash_locks_(
 
  163             other.num_remaining_lazy_rehash_locks_),
 
  164         minimum_load_factor_(other.minimum_load_factor_),
 
  165         maximum_hashpower_(other.maximum_hashpower_),
 
  166         max_num_worker_threads_(other.max_num_worker_threads_) {
 
  168       all_locks_ = other.all_locks_;
 
  170       add_locks_from_other(other);
 
  190       : hash_fn_(std::move(other.hash_fn_)), eq_fn_(std::move(other.eq_fn_)),
 
  191         buckets_(std::move(other.buckets_), alloc),
 
  192         old_buckets_(std::move(other.old_buckets_), alloc),
 
  194         num_remaining_lazy_rehash_locks_(
 
  195             other.num_remaining_lazy_rehash_locks_),
 
  196         minimum_load_factor_(other.minimum_load_factor_),
 
  197         maximum_hashpower_(other.maximum_hashpower_),
 
  198         max_num_worker_threads_(other.max_num_worker_threads_) {
 
  199     if (other.get_allocator() == alloc) {
 
  200       all_locks_ = std::move(other.all_locks_);
 
  202       add_locks_from_other(other);
 
  216                  size_type n = DEFAULT_SIZE, 
const Hash &hf = Hash(),
 
  217                  const KeyEqual &equal = KeyEqual(),
 
  218                  const Allocator &alloc = Allocator())
 
  219       : 
cuckoohash_map(init.begin(), init.end(), n, hf, equal, alloc) {}
 
  227     std::swap(hash_fn_, other.hash_fn_);
 
  228     std::swap(eq_fn_, other.eq_fn_);
 
  229     buckets_.swap(other.buckets_);
 
  230     all_locks_.swap(other.all_locks_);
 
  231     other.minimum_load_factor_.store(
 
  232         minimum_load_factor_.exchange(other.minimum_load_factor(),
 
  233                                       std::memory_order_release),
 
  234         std::memory_order_release);
 
  235     other.maximum_hashpower_.store(
 
  236         maximum_hashpower_.exchange(other.maximum_hashpower(),
 
  237                                     std::memory_order_release),
 
  238         std::memory_order_release);
 
  267     for (
const auto &item : ilist) {
 
  268       insert(item.first, item.second);
 
  298   key_equal 
key_eq()
 const { 
return eq_fn_; }
 
  313   size_type 
hashpower()
 const { 
return buckets_.hashpower(); }
 
  335     if (all_locks_.size() == 0) {
 
  339     for (spinlock &lock : get_current_locks()) {
 
  340       s += lock.elem_counter();
 
  343     return static_cast<size_type
>(s);
 
  360     return static_cast<double>(
size()) / 
static_cast<double>(
capacity());
 
  375       throw std::invalid_argument(
"load factor " + std::to_string(mlf) +
 
  378     } 
else if (mlf > 1.0) {
 
  379       throw std::invalid_argument(
"load factor " + std::to_string(mlf) +
 
  383     minimum_load_factor_.store(mlf, std::memory_order_release);
 
  392     return minimum_load_factor_.load(std::memory_order_acquire);
 
  406       throw std::invalid_argument(
"maximum hashpower " + std::to_string(mhp) +
 
  407                                   " is less than current hashpower");
 
  409     maximum_hashpower_.store(mhp, std::memory_order_release);
 
  418     return maximum_hashpower_.load(std::memory_order_acquire);
 
  434     max_num_worker_threads_.store(extra_threads, std::memory_order_release);
 
  441     return max_num_worker_threads_.load(std::memory_order_acquire);
 
  465   template <
typename K, 
typename F> 
bool find_fn(
const K &key, F fn)
 const {
 
  466     const hash_value hv = hashed_key(key);
 
  467     const auto b = snapshot_and_lock_two<normal_mode>(hv);
 
  468     const table_position pos = cuckoo_find(key, hv.partial, b.i1, b.i2);
 
  469     if (pos.status == ok) {
 
  470       fn(buckets_[pos.index].mapped(pos.slot));
 
  488   template <
typename K, 
typename F> 
bool update_fn(
const K &key, F fn) {
 
  489     const hash_value hv = hashed_key(key);
 
  490     const auto b = snapshot_and_lock_two<normal_mode>(hv);
 
  491     const table_position pos = cuckoo_find(key, hv.partial, b.i1, b.i2);
 
  492     if (pos.status == ok) {
 
  493       fn(buckets_[pos.index].mapped(pos.slot));
 
  512   template <
typename K, 
typename F> 
bool erase_fn(
const K &key, F fn) {
 
  513     const hash_value hv = hashed_key(key);
 
  514     const auto b = snapshot_and_lock_two<normal_mode>(hv);
 
  515     const table_position pos = cuckoo_find(key, hv.partial, b.i1, b.i2);
 
  516     if (pos.status == ok) {
 
  517       if (fn(buckets_[pos.index].mapped(pos.slot))) {
 
  518         del_from_bucket(pos.index, pos.slot);
 
  554   template <
typename K, 
typename F, 
typename... Args>
 
  556     hash_value hv = hashed_key(key);
 
  557     auto b = snapshot_and_lock_two<normal_mode>(hv);
 
  558     table_position pos = cuckoo_insert_loop<normal_mode>(hv, b, key);
 
  560     if (pos.status == ok) {
 
  561       add_to_bucket(pos.index, pos.slot, hv.partial, std::forward<K>(key),
 
  562                     std::forward<Args>(val)...);
 
  563       upsert_context = UpsertContext::NEWLY_INSERTED;
 
  565       upsert_context = UpsertContext::ALREADY_EXISTED;
 
  567     using CanInvokeWithUpsertContextT =
 
  568         typename internal::CanInvokeWithUpsertContext<F, mapped_type>::type;
 
  569     if (internal::InvokeUpraseFn(fn, buckets_[pos.index].mapped(pos.slot),
 
  571                                  CanInvokeWithUpsertContextT{})) {
 
  572       del_from_bucket(pos.index, pos.slot);
 
  574     return pos.status == ok;
 
  584   template <
typename K, 
typename F, 
typename... Args>
 
  585   bool upsert(K &&key, F fn, Args &&... val) {
 
  586     constexpr 
bool kCanInvokeWithUpsertContext =
 
  589         std::forward<K>(key),
 
  592         std::forward<Args>(val)...);
 
  600   template <
typename K> 
bool find(
const K &key, mapped_type &val)
 const {
 
  601     return find_fn(key, [&val](
const mapped_type &v) 
mutable { val = v; });
 
  612   template <
typename K> mapped_type 
find(
const K &key)
 const {
 
  613     const hash_value hv = hashed_key(key);
 
  614     const auto b = snapshot_and_lock_two<normal_mode>(hv);
 
  615     const table_position pos = cuckoo_find(key, hv.partial, b.i1, b.i2);
 
  616     if (pos.status == ok) {
 
  617       return buckets_[pos.index].mapped(pos.slot);
 
  619       throw std::out_of_range(
"key not found in table");
 
  627   template <
typename K> 
bool contains(
const K &key)
 const {
 
  628     return find_fn(key, [](
const mapped_type &) {});
 
  637   template <
typename K, 
typename V> 
bool update(
const K &key, V &&val) {
 
  638     return update_fn(key, [&val](mapped_type &v) { v = std::forward<V>(val); });
 
  645   template <
typename K, 
typename... Args> 
bool insert(K &&key, Args &&... val) {
 
  646     return upsert(std::forward<K>(key), [](mapped_type &) {},
 
  647                   std::forward<Args>(val)...);
 
  657     return upsert(std::forward<K>(key), [&val](mapped_type &m) { m = val; },
 
  658                   std::forward<V>(val));
 
  665   template <
typename K> 
bool erase(
const K &key) {
 
  666     return erase_fn(key, [](mapped_type &) { 
return true; });
 
  678   bool rehash(size_type n) { 
return cuckoo_rehash<normal_mode>(n); }
 
  689   bool reserve(size_type n) { 
return cuckoo_reserve<normal_mode>(n); }
 
  695     auto all_locks_manager = lock_all(normal_mode());
 
  713     locks_t &other_locks = other.get_current_locks();
 
  715     all_locks_.back().resize(other_locks.size());
 
  716     std::copy(other_locks.begin(), other_locks.end(),
 
  717               get_current_locks().begin());
 
  724   static constexpr 
bool is_simple() {
 
  725     return std::is_standard_layout<key_type>::value &&
 
  726            std::is_trivial<key_type>::value &&
 
  727            sizeof(key_type) <= 8;
 
  731   static constexpr 
bool is_data_nothrow_move_constructible() {
 
  732     return std::is_nothrow_move_constructible<key_type>::value &&
 
  733            std::is_nothrow_move_constructible<mapped_type>::value;
 
  744   template <
typename K> hash_value hashed_key(
const K &key)
 const {
 
  746     return {hash, partial_key(hash)};
 
  749   template <
typename K> size_type hashed_key_only_hash(
const K &key)
 const {
 
  755   static inline size_type hashsize(
const size_type hp) {
 
  756     return size_type(1) << hp;
 
  761   static inline size_type hashmask(
const size_type hp) {
 
  762     return hashsize(hp) - 1;
 
  769   static partial_t partial_key(
const size_type hash) {
 
  770     const uint64_t hash_64bit = hash;
 
  771     const uint32_t hash_32bit = (
static_cast<uint32_t
>(hash_64bit) ^
 
  772                                  static_cast<uint32_t
>(hash_64bit >> 32));
 
  773     const uint16_t hash_16bit = (
static_cast<uint16_t
>(hash_32bit) ^
 
  774                                  static_cast<uint16_t
>(hash_32bit >> 16));
 
  775     const uint8_t hash_8bit = (
static_cast<uint8_t
>(hash_16bit) ^
 
  776                                static_cast<uint8_t
>(hash_16bit >> 8));
 
  782   static inline size_type index_hash(
const size_type hp, 
const size_type hv) {
 
  783     return hv & hashmask(hp);
 
  791   static inline size_type alt_index(
const size_type hp, 
const partial_t partial,
 
  792                                     const size_type index) {
 
  795     const size_type nonzero_tag = 
static_cast<size_type
>(partial) + 1;
 
  796     return (index ^ (nonzero_tag * 0xc6a4a7935bd1e995)) & hashmask(hp);
 
  802   using counter_type = int64_t;
 
  822     spinlock() : elem_counter_(0), is_migrated_(true) { lock_.clear(); }
 
  824     spinlock(
const spinlock &other) noexcept
 
  825         : elem_counter_(other.elem_counter()),
 
  826           is_migrated_(other.is_migrated()) {
 
  830     spinlock &
operator=(
const spinlock &other) noexcept {
 
  831       elem_counter() = other.elem_counter();
 
  832       is_migrated() = other.is_migrated();
 
  836     void lock() noexcept {
 
  837       while (lock_.test_and_set(std::memory_order_acq_rel))
 
  841     void unlock() noexcept { lock_.
clear(std::memory_order_release); }
 
  843     bool try_lock() noexcept {
 
  844       return !lock_.test_and_set(std::memory_order_acq_rel);
 
  847     counter_type &elem_counter() noexcept { 
return elem_counter_; }
 
  848     counter_type elem_counter() const noexcept { 
return elem_counter_; }
 
  850     bool &is_migrated() noexcept { 
return is_migrated_; }
 
  851     bool is_migrated() const noexcept { 
return is_migrated_; }
 
  854     std::atomic_flag lock_;
 
  855     counter_type elem_counter_;
 
  859   template <
typename U>
 
  861       typename std::allocator_traits<allocator_type>::template rebind_alloc<U>;
 
  863   using locks_t = std::vector<spinlock, rebind_alloc<spinlock>>;
 
  864   using all_locks_t = std::list<locks_t, rebind_alloc<locks_t>>;
 
  871     void operator()(spinlock *l)
 const { l->unlock(); }
 
  874   using LockManager = std::unique_ptr<spinlock, LockDeleter>;
 
  881   using locked_table_mode = std::integral_constant<bool, true>;
 
  882   using normal_mode = std::integral_constant<bool, false>;
 
  887     TwoBuckets(size_type i1_, size_type i2_, locked_table_mode)
 
  888         : i1(i1_), i2(i2_) {}
 
  889     TwoBuckets(locks_t &locks, size_type i1_, size_type i2_, normal_mode)
 
  890         : i1(i1_), i2(i2_), first_manager_(&locks[lock_ind(i1)]),
 
  891           second_manager_((lock_ind(i1) != lock_ind(i2)) ? &locks[lock_ind(i2)]
 
  895       first_manager_.reset();
 
  896       second_manager_.reset();
 
  902     LockManager first_manager_, second_manager_;
 
  907       for (
auto it = first_locked; it != map->all_locks_.end(); ++it) {
 
  908         locks_t &locks = *it;
 
  909         for (spinlock &lock : locks) {
 
  915     typename all_locks_t::iterator first_locked;
 
  918   using AllLocksManager = std::unique_ptr<cuckoohash_map, AllUnlocker>;
 
  922   class hashpower_changed {};
 
  928   inline void check_hashpower(size_type hp, spinlock &lock)
 const {
 
  932       throw hashpower_changed();
 
  949   static constexpr 
bool kIsLazy = 
true;
 
  950   static constexpr 
bool kIsNotLazy = 
false;
 
  952   template <
bool IS_LAZY>
 
  953   void rehash_lock(
size_t l) 
const noexcept {
 
  954     locks_t &locks = get_current_locks();
 
  955     spinlock &lock = locks[l];
 
  956     if (lock.is_migrated()) 
return;
 
  958     assert(is_data_nothrow_move_constructible());
 
  959     assert(locks.size() == kMaxNumLocks);
 
  960     assert(old_buckets_.hashpower() + 1 == buckets_.hashpower());
 
  961     assert(old_buckets_.size() >= kMaxNumLocks);
 
  964     for (size_type bucket_ind = l; bucket_ind < old_buckets_.size();
 
  965          bucket_ind += kMaxNumLocks) {
 
  966       move_bucket(old_buckets_, buckets_, bucket_ind);
 
  968     lock.is_migrated() = 
true;
 
  971       decrement_num_remaining_lazy_rehash_locks();
 
  978   LockManager lock_one(size_type, size_type, locked_table_mode)
 const {
 
  979     return LockManager();
 
  982   LockManager lock_one(size_type hp, size_type i, normal_mode)
 const {
 
  983     locks_t &locks = get_current_locks();
 
  984     const size_type l = lock_ind(i);
 
  985     spinlock &lock = locks[l];
 
  987     check_hashpower(hp, lock);
 
  988     rehash_lock<kIsLazy>(l);
 
  989     return LockManager(&lock);
 
  996   TwoBuckets lock_two(size_type, size_type i1, size_type i2,
 
  997                       locked_table_mode)
 const {
 
  998     return TwoBuckets(i1, i2, locked_table_mode());
 
 1001   TwoBuckets lock_two(size_type hp, size_type i1, size_type i2,
 
 1002                       normal_mode)
 const {
 
 1003     size_type l1 = lock_ind(i1);
 
 1004     size_type l2 = lock_ind(i2);
 
 1008     locks_t &locks = get_current_locks();
 
 1010     check_hashpower(hp, locks[l1]);
 
 1014     rehash_lock<kIsLazy>(l1);
 
 1015     rehash_lock<kIsLazy>(l2);
 
 1016     return TwoBuckets(locks, i1, i2, normal_mode());
 
 1024   std::pair<TwoBuckets, LockManager> lock_three(size_type, size_type i1,
 
 1025                                                 size_type i2, size_type,
 
 1026                                                 locked_table_mode)
 const {
 
 1027     return std::make_pair(TwoBuckets(i1, i2, locked_table_mode()),
 
 1031   std::pair<TwoBuckets, LockManager> lock_three(size_type hp, size_type i1,
 
 1032                                                 size_type i2, size_type i3,
 
 1033                                                 normal_mode)
 const {
 
 1034     std::array<size_type, 3> l{{lock_ind(i1), lock_ind(i2), lock_ind(i3)}};
 
 1037       std::swap(l[2], l[1]);
 
 1039       std::swap(l[2], l[0]);
 
 1041       std::swap(l[1], l[0]);
 
 1042     locks_t &locks = get_current_locks();
 
 1044     check_hashpower(hp, locks[l[0]]);
 
 1051     rehash_lock<kIsLazy>(l[0]);
 
 1052     rehash_lock<kIsLazy>(l[1]);
 
 1053     rehash_lock<kIsLazy>(l[2]);
 
 1054     return std::make_pair(TwoBuckets(locks, i1, i2, normal_mode()),
 
 1055                           LockManager((lock_ind(i3) == lock_ind(i1) ||
 
 1056                                        lock_ind(i3) == lock_ind(i2))
 
 1058                                           : &locks[lock_ind(i3)]));
 
 1067   template <
typename TABLE_MODE>
 
 1068   TwoBuckets snapshot_and_lock_two(
const hash_value &hv)
 const {
 
 1072       const size_type i1 = index_hash(hp, hv.hash);
 
 1073       const size_type i2 = alt_index(hp, hv.partial, i1);
 
 1075         return lock_two(hp, i1, i2, TABLE_MODE());
 
 1076       } 
catch (hashpower_changed &) {
 
 1089   AllLocksManager lock_all(locked_table_mode) {
 
 1090     return AllLocksManager();
 
 1093   AllLocksManager lock_all(normal_mode) {
 
 1096     assert(!all_locks_.empty());
 
 1097     const auto first_locked = std::prev(all_locks_.end());
 
 1098     auto current_locks = first_locked;
 
 1099     while (current_locks != all_locks_.end()) {
 
 1100       locks_t &locks = *current_locks;
 
 1101       for (spinlock &lock : locks) {
 
 1108     return AllLocksManager(
this, AllUnlocker{first_locked});
 
 1112   static inline size_type lock_ind(
const size_type bucket_ind) {
 
 1113     return bucket_ind & (kMaxNumLocks - 1);
 
 1119   using bucket = 
typename buckets_t::bucket;
 
 1123   enum cuckoo_status {
 
 1126     failure_key_not_found,
 
 1127     failure_key_duplicated,
 
 1129     failure_under_expansion,
 
 1134   struct table_position {
 
 1137     cuckoo_status status;
 
 1145   template <
typename K>
 
 1146   table_position cuckoo_find(
const K &key, 
const partial_t partial,
 
 1147                              const size_type i1, 
const size_type i2)
 const {
 
 1148     int slot = try_read_from_bucket(buckets_[i1], partial, key);
 
 1150       return table_position{i1, 
static_cast<size_type
>(slot), ok};
 
 1152     slot = try_read_from_bucket(buckets_[i2], partial, key);
 
 1154       return table_position{i2, 
static_cast<size_type
>(slot), ok};
 
 1156     return table_position{0, 0, failure_key_not_found};
 
 1161   template <
typename K>
 
 1162   int try_read_from_bucket(
const bucket &b, 
const partial_t partial,
 
 1163                            const K &key)
 const {
 
 1167       if (!b.occupied(i) || (!is_simple() && partial != b.partial(i))) {
 
 1169       } 
else if (
key_eq()(b.key(i), key)) {
 
 1191   template <
typename TABLE_MODE, 
typename K>
 
 1192   table_position cuckoo_insert_loop(hash_value hv, TwoBuckets &b, K &key) {
 
 1196       pos = cuckoo_insert<TABLE_MODE>(hv, b, key);
 
 1197       switch (pos.status) {
 
 1199       case failure_key_duplicated:
 
 1201       case failure_table_full:
 
 1203         cuckoo_fast_double<TABLE_MODE, automatic_resize>(hp);
 
 1204         b = snapshot_and_lock_two<TABLE_MODE>(hv);
 
 1206       case failure_under_expansion:
 
 1209         b = snapshot_and_lock_two<TABLE_MODE>(hv);
 
 1235   template <
typename TABLE_MODE, 
typename K>
 
 1236   table_position cuckoo_insert(
const hash_value hv, TwoBuckets &b, K &key) {
 
 1238     bucket &b1 = buckets_[b.i1];
 
 1239     if (!try_find_insert_bucket(b1, res1, hv.partial, key)) {
 
 1240       return table_position{b.i1, 
static_cast<size_type
>(res1),
 
 1241                             failure_key_duplicated};
 
 1243     bucket &b2 = buckets_[b.i2];
 
 1244     if (!try_find_insert_bucket(b2, res2, hv.partial, key)) {
 
 1245       return table_position{b.i2, 
static_cast<size_type
>(res2),
 
 1246                             failure_key_duplicated};
 
 1249       return table_position{b.i1, 
static_cast<size_type
>(res1), ok};
 
 1252       return table_position{b.i2, 
static_cast<size_type
>(res2), ok};
 
 1256     size_type insert_bucket = 0;
 
 1257     size_type insert_slot = 0;
 
 1258     cuckoo_status st = run_cuckoo<TABLE_MODE>(b, insert_bucket, insert_slot);
 
 1259     if (st == failure_under_expansion) {
 
 1263       return table_position{0, 0, failure_under_expansion};
 
 1264     } 
else if (st == ok) {
 
 1265       assert(TABLE_MODE() == locked_table_mode() ||
 
 1266              !get_current_locks()[lock_ind(b.i1)].try_lock());
 
 1267       assert(TABLE_MODE() == locked_table_mode() ||
 
 1268              !get_current_locks()[lock_ind(b.i2)].try_lock());
 
 1269       assert(!buckets_[insert_bucket].occupied(insert_slot));
 
 1270       assert(insert_bucket == index_hash(
hashpower(), hv.hash) ||
 
 1271              insert_bucket == alt_index(
hashpower(), hv.partial,
 
 1276       table_position pos = cuckoo_find(key, hv.partial, b.i1, b.i2);
 
 1277       if (pos.status == ok) {
 
 1278         pos.status = failure_key_duplicated;
 
 1281       return table_position{insert_bucket, insert_slot, ok};
 
 1283     assert(st == failure);
 
 1284     LIBCUCKOO_DBG(
"hash table is full (hashpower = %zu, hash_items = %zu," 
 1285                   "load factor = %.2f), need to increase hashpower\n",
 
 1287     return table_position{0, 0, failure_table_full};
 
 1293   template <
typename K, 
typename... Args>
 
 1294   void add_to_bucket(
const size_type bucket_ind, 
const size_type slot,
 
 1295                      const partial_t partial, K &&key, Args &&... val) {
 
 1296     buckets_.setKV(bucket_ind, slot, partial, std::forward<K>(key),
 
 1297                    std::forward<Args>(val)...);
 
 1298     ++get_current_locks()[lock_ind(bucket_ind)].elem_counter();
 
 1306   template <
typename K>
 
 1307   bool try_find_insert_bucket(
const bucket &b, 
int &slot,
 
 1308                               const partial_t partial, 
const K &key)
 const {
 
 1313       if (b.occupied(i)) {
 
 1314         if (!is_simple() && partial != b.partial(i)) {
 
 1317         if (
key_eq()(b.key(i), key)) {
 
 1340   static constexpr uint8_t MAX_BFS_PATH_LEN = 5;
 
 1343   using CuckooRecords = std::array<CuckooRecord, MAX_BFS_PATH_LEN>;
 
 1353   template <
typename TABLE_MODE>
 
 1354   cuckoo_status run_cuckoo(TwoBuckets &b, size_type &insert_bucket,
 
 1355                            size_type &insert_slot) {
 
 1375     CuckooRecords cuckoo_path;
 
 1380             cuckoopath_search<TABLE_MODE>(hp, cuckoo_path, b.i1, b.i2);
 
 1385         if (cuckoopath_move<TABLE_MODE>(hp, cuckoo_path, depth, b)) {
 
 1386           insert_bucket = cuckoo_path[0].bucket;
 
 1387           insert_slot = cuckoo_path[0].slot;
 
 1388           assert(insert_bucket == b.i1 || insert_bucket == b.i2);
 
 1389           assert(TABLE_MODE() == locked_table_mode() ||
 
 1390                  !get_current_locks()[lock_ind(b.i1)].try_lock());
 
 1391           assert(TABLE_MODE() == locked_table_mode() ||
 
 1392                  !get_current_locks()[lock_ind(b.i2)].try_lock());
 
 1393           assert(!buckets_[insert_bucket].occupied(insert_slot));
 
 1398     } 
catch (hashpower_changed &) {
 
 1402       return failure_under_expansion;
 
 1404     return done ? ok : failure;
 
 1415   template <
typename TABLE_MODE>
 
 1416   int cuckoopath_search(
const size_type hp, CuckooRecords &cuckoo_path,
 
 1417                         const size_type i1, 
const size_type i2) {
 
 1418     b_slot x = slot_search<TABLE_MODE>(hp, i1, i2);
 
 1419     if (x.depth == -1) {
 
 1423     for (
int i = x.depth; i >= 0; i--) {
 
 1432     CuckooRecord &first = cuckoo_path[0];
 
 1433     if (x.pathcode == 0) {
 
 1436       assert(x.pathcode == 1);
 
 1440       const auto lock_manager = lock_one(hp, first.bucket, TABLE_MODE());
 
 1441       const bucket &b = buckets_[first.bucket];
 
 1442       if (!b.occupied(first.slot)) {
 
 1446       first.hv = hashed_key(b.key(first.slot));
 
 1448     for (
int i = 1; i <= x.depth; ++i) {
 
 1449       CuckooRecord &curr = cuckoo_path[i];
 
 1450       const CuckooRecord &prev = cuckoo_path[i - 1];
 
 1451       assert(prev.bucket == index_hash(hp, prev.hv.hash) ||
 
 1453                  alt_index(hp, prev.hv.partial, index_hash(hp, prev.hv.hash)));
 
 1456       curr.bucket = alt_index(hp, prev.hv.partial, prev.bucket);
 
 1457       const auto lock_manager = lock_one(hp, curr.bucket, TABLE_MODE());
 
 1458       const bucket &b = buckets_[curr.bucket];
 
 1459       if (!b.occupied(curr.slot)) {
 
 1463       curr.hv = hashed_key(b.key(curr.slot));
 
 1476   template <
typename TABLE_MODE>
 
 1477   bool cuckoopath_move(
const size_type hp, CuckooRecords &cuckoo_path,
 
 1478                        size_type depth, TwoBuckets &b) {
 
 1486       const size_type bucket_i = cuckoo_path[0].bucket;
 
 1487       assert(bucket_i == b.i1 || bucket_i == b.i2);
 
 1488       b = lock_two(hp, b.i1, b.i2, TABLE_MODE());
 
 1489       if (!buckets_[bucket_i].occupied(cuckoo_path[0].slot)) {
 
 1498       CuckooRecord &from = cuckoo_path[depth - 1];
 
 1499       CuckooRecord &to = cuckoo_path[depth];
 
 1500       const size_type fs = from.slot;
 
 1501       const size_type ts = to.slot;
 
 1503       LockManager extra_manager;
 
 1510         std::tie(twob, extra_manager) =
 
 1511             lock_three(hp, b.i1, b.i2, to.bucket, TABLE_MODE());
 
 1513         twob = lock_two(hp, from.bucket, to.bucket, TABLE_MODE());
 
 1516       bucket &fb = buckets_[from.bucket];
 
 1517       bucket &tb = buckets_[to.bucket];
 
 1527       if (tb.occupied(ts) || !fb.occupied(fs) ||
 
 1528           hashed_key_only_hash(fb.key(fs)) != from.hv.hash) {
 
 1532       buckets_.setKV(to.bucket, ts, fb.partial(fs), fb.movable_key(fs),
 
 1533                      std::move(fb.mapped(fs)));
 
 1534       buckets_.eraseKV(from.bucket, fs);
 
 1537         b = std::move(twob);
 
 1546   static constexpr size_type const_pow(size_type a, size_type b) {
 
 1547     return (b == 0) ? 1 : a * const_pow(a, b - 1);
 
 1560                       std::numeric_limits<decltype(pathcode)>::max(),
 
 1561                   "pathcode may not be large enough to encode a cuckoo " 
 1566     static_assert(MAX_BFS_PATH_LEN - 1 <=
 
 1567                       std::numeric_limits<decltype(depth)>::max(),
 
 1568                   "The depth type must able to hold a value of" 
 1569                   " MAX_BFS_PATH_LEN - 1");
 
 1570     static_assert(-1 >= std::numeric_limits<decltype(depth)>::min(),
 
 1571                   "The depth type must be able to hold a value of -1");
 
 1573     b_slot(
const size_type b, 
const uint16_t p, 
const decltype(depth) d)
 
 1574         : bucket(b), pathcode(p), depth(d) {
 
 1575       assert(d < MAX_BFS_PATH_LEN);
 
 1582     b_queue() noexcept : first_(0), last_(0) {}
 
 1584     void enqueue(b_slot x) {
 
 1586       slots_[last_++] = x;
 
 1591       assert(first_ < last_);
 
 1592       b_slot &x = slots_[first_++];
 
 1596     bool empty()
 const { 
return first_ == last_; }
 
 1598     bool full()
 const { 
return last_ == MAX_CUCKOO_COUNT; }
 
 1610                   "SLOT_PER_BUCKET must be greater than 0.");
 
 1611     static constexpr size_type MAX_CUCKOO_COUNT =
 
 1618     b_slot slots_[MAX_CUCKOO_COUNT];
 
 1631   template <
typename TABLE_MODE>
 
 1632   b_slot slot_search(
const size_type hp, 
const size_type i1,
 
 1633                      const size_type i2) {
 
 1637     q.enqueue(b_slot(i1, 0, 0));
 
 1638     q.enqueue(b_slot(i2, 1, 0));
 
 1639     while (!q.empty()) {
 
 1640       b_slot x = q.dequeue();
 
 1641       auto lock_manager = lock_one(hp, x.bucket, TABLE_MODE());
 
 1642       bucket &b = buckets_[x.bucket];
 
 1647         if (!b.occupied(slot)) {
 
 1656         const partial_t partial = b.partial(slot);
 
 1657         if (x.depth < MAX_BFS_PATH_LEN - 1) {
 
 1659           b_slot y(alt_index(hp, partial, x.bucket),
 
 1667     return b_slot(0, 0, -1);
 
 1674   template <
typename TABLE_MODE, 
typename AUTO_RESIZE>
 
 1675   cuckoo_status cuckoo_fast_double(size_type current_hp) {
 
 1676     if (!is_data_nothrow_move_constructible()) {
 
 1677       LIBCUCKOO_DBG(
"%s", 
"cannot run cuckoo_fast_double because key-value" 
 1678                           " pair is not nothrow move constructible");
 
 1679       return cuckoo_expand_simple<TABLE_MODE, AUTO_RESIZE>(current_hp + 1);
 
 1681     const size_type new_hp = current_hp + 1;
 
 1682     auto all_locks_manager = lock_all(TABLE_MODE());
 
 1683     cuckoo_status st = check_resize_validity<AUTO_RESIZE>(current_hp, new_hp);
 
 1703       locks_t ¤t_locks = get_current_locks();
 
 1704       for (
size_t i = 0; i < current_locks.size(); ++i) {
 
 1705         rehash_lock<kIsNotLazy>(i);
 
 1707       num_remaining_lazy_rehash_locks(0);
 
 1713     maybe_resize_locks(size_type(1) << new_hp);
 
 1714     locks_t ¤t_locks = get_current_locks();
 
 1719     old_buckets_.swap(buckets_);
 
 1735     if (old_buckets_.size() < kMaxNumLocks) {
 
 1736       for (size_type i = 0; i < old_buckets_.size(); ++i) {
 
 1737         move_bucket(old_buckets_, buckets_, i);
 
 1740       num_remaining_lazy_rehash_locks(0);
 
 1744       for (spinlock &lock : current_locks) {
 
 1745         lock.is_migrated() = 
false;
 
 1747       num_remaining_lazy_rehash_locks(current_locks.size());
 
 1748       if (std::is_same<TABLE_MODE, locked_table_mode>::value) {
 
 1749         rehash_with_workers();
 
 1755   void move_bucket(buckets_t &old_buckets, buckets_t &new_buckets,
 
 1756                    size_type old_bucket_ind) 
const noexcept {
 
 1757     const size_t old_hp = old_buckets.hashpower();
 
 1758     const size_t new_hp = new_buckets.hashpower();
 
 1764     bucket &old_bucket = old_buckets_[old_bucket_ind];
 
 1765     const size_type new_bucket_ind = old_bucket_ind + hashsize(old_hp);
 
 1766     size_type new_bucket_slot = 0;
 
 1771     for (size_type old_bucket_slot = 0; old_bucket_slot < 
slot_per_bucket();
 
 1772          ++old_bucket_slot) {
 
 1773       if (!old_bucket.occupied(old_bucket_slot)) {
 
 1776       const hash_value hv = hashed_key(old_bucket.key(old_bucket_slot));
 
 1777       const size_type old_ihash = index_hash(old_hp, hv.hash);
 
 1778       const size_type old_ahash = alt_index(old_hp, hv.partial, old_ihash);
 
 1779       const size_type new_ihash = index_hash(new_hp, hv.hash);
 
 1780       const size_type new_ahash = alt_index(new_hp, hv.partial, new_ihash);
 
 1781       size_type dst_bucket_ind, dst_bucket_slot;
 
 1782       if ((old_bucket_ind == old_ihash && new_ihash == new_bucket_ind) ||
 
 1783           (old_bucket_ind == old_ahash && new_ahash == new_bucket_ind)) {
 
 1785         dst_bucket_ind = new_bucket_ind;
 
 1786         dst_bucket_slot = new_bucket_slot++;
 
 1789         assert((old_bucket_ind == old_ihash && new_ihash == old_ihash) ||
 
 1790                (old_bucket_ind == old_ahash && new_ahash == old_ahash));
 
 1791         dst_bucket_ind = old_bucket_ind;
 
 1792         dst_bucket_slot = old_bucket_slot;
 
 1794       new_buckets.setKV(dst_bucket_ind, dst_bucket_slot++,
 
 1795                         old_bucket.partial(old_bucket_slot),
 
 1796                         old_bucket.movable_key(old_bucket_slot),
 
 1797                         std::move(old_bucket.mapped(old_bucket_slot)));
 
 1803   using automatic_resize = std::integral_constant<bool, true>;
 
 1804   using manual_resize = std::integral_constant<bool, false>;
 
 1806   template <
typename AUTO_RESIZE>
 
 1807   cuckoo_status check_resize_validity(
const size_type orig_hp,
 
 1808                                       const size_type new_hp) {
 
 1810     if (mhp != NO_MAXIMUM_HASHPOWER && new_hp > mhp) {
 
 1811       throw maximum_hashpower_exceeded(new_hp);
 
 1820       return failure_under_expansion;
 
 1835   void maybe_resize_locks(size_type new_bucket_count) {
 
 1836     locks_t ¤t_locks = get_current_locks();
 
 1837     if (!(current_locks.size() < kMaxNumLocks &&
 
 1838           current_locks.size() < new_bucket_count)) {
 
 1843     new_locks.resize(std::min(size_type(kMaxNumLocks), new_bucket_count));
 
 1844     assert(new_locks.size() > current_locks.size());
 
 1845     std::copy(current_locks.begin(), current_locks.end(), new_locks.begin());
 
 1846     for (spinlock &lock : new_locks) {
 
 1849     all_locks_.emplace_back(std::move(new_locks));
 
 1859   template <
typename TABLE_MODE, 
typename AUTO_RESIZE>
 
 1860   cuckoo_status cuckoo_expand_simple(size_type new_hp) {
 
 1861     auto all_locks_manager = lock_all(TABLE_MODE());
 
 1863     cuckoo_status st = check_resize_validity<AUTO_RESIZE>(hp, new_hp);
 
 1869     rehash_with_workers();
 
 1881         (size_type i, size_type end, std::exception_ptr &eptr) {
 
 1883             for (; i < end; ++i) {
 
 1884               auto &bucket = buckets_[i];
 
 1886                 if (bucket.occupied(j)) {
 
 1887                   new_map.insert(bucket.movable_key(j),
 
 1888                                  std::move(bucket.mapped(j)));
 
 1893             eptr = std::current_exception();
 
 1898     new_map.rehash_with_workers();
 
 1903     maybe_resize_locks(new_map.bucket_count());
 
 1904     buckets_.swap(new_map.buckets_);
 
 1918   template <
typename F>
 
 1919   void parallel_exec_noexcept(size_type start, size_type end, F func) {
 
 1921     const size_type num_workers = 1 + num_extra_threads;
 
 1922     size_type work_per_thread = (end - start) / num_workers;
 
 1923     std::vector<std::thread, rebind_alloc<std::thread>> threads(
 
 1925     threads.reserve(num_extra_threads);
 
 1926     for (size_type i = 0; i < num_extra_threads; ++i) {
 
 1927       threads.emplace_back(func, start, start + work_per_thread);
 
 1928       start += work_per_thread;
 
 1931     for (std::thread &t : threads) {
 
 1936   template <
typename F>
 
 1937   void parallel_exec(size_type start, size_type end, F func) {
 
 1939     const size_type num_workers = 1 + num_extra_threads;
 
 1940     size_type work_per_thread = (end - start) / num_workers;
 
 1941     std::vector<std::thread, rebind_alloc<std::thread>> threads(
 
 1943     threads.reserve(num_extra_threads);
 
 1945     std::vector<std::exception_ptr, rebind_alloc<std::exception_ptr>> eptrs(
 
 1947     for (size_type i = 0; i < num_extra_threads; ++i) {
 
 1948       threads.emplace_back(func, start, start + work_per_thread,
 
 1949                            std::ref(eptrs[i]));
 
 1950       start += work_per_thread;
 
 1952     func(start, end, std::ref(eptrs.back()));
 
 1953     for (std::thread &t : threads) {
 
 1956     for (std::exception_ptr &eptr : eptrs) {
 
 1957       if (eptr) std::rethrow_exception(eptr);
 
 1963   void rehash_with_workers() noexcept {
 
 1964     locks_t ¤t_locks = get_current_locks();
 
 1965     parallel_exec_noexcept(
 
 1966         0, current_locks.size(),
 
 1967         [
this](size_type start, size_type end) {
 
 1968           for (size_type i = start; i < end; ++i) {
 
 1969             rehash_lock<kIsNotLazy>(i);
 
 1972     num_remaining_lazy_rehash_locks(0);
 
 1979   void del_from_bucket(
const size_type bucket_ind, 
const size_type slot) {
 
 1980     buckets_.eraseKV(bucket_ind, slot);
 
 1981     --get_current_locks()[lock_ind(bucket_ind)].elem_counter();
 
 1986   void cuckoo_clear() {
 
 1990     num_remaining_lazy_rehash_locks(0);
 
 1991     for (spinlock &lock : get_current_locks()) {
 
 1992       lock.elem_counter() = 0;
 
 1993       lock.is_migrated() = 
true;
 
 1999   template <
typename TABLE_MODE> 
bool cuckoo_rehash(size_type n) {
 
 2000     const size_type hp = hashpower();
 
 2004     return cuckoo_expand_simple<TABLE_MODE, manual_resize>(n) == ok;
 
 2007   template <
typename TABLE_MODE> 
bool cuckoo_reserve(size_type n) {
 
 2008     const size_type hp = hashpower();
 
 2009     const size_type new_hp = reserve_calc(n);
 
 2013     return cuckoo_expand_simple<TABLE_MODE, manual_resize>(new_hp) == ok;
 
 2020   static size_type reserve_calc(
const size_type n) {
 
 2021     const size_type buckets = (n + slot_per_bucket() - 1) / slot_per_bucket();
 
 2023     for (blog2 = 0; (size_type(1) << blog2) < buckets; ++blog2)
 
 2025     assert(n <= buckets * slot_per_bucket() && buckets <= hashsize(blog2));
 
 2030   friend class UnitTestInternalAccess;
 
 2032   static constexpr size_type kMaxNumLocks = 1UL << 16;
 
 2034   locks_t &get_current_locks()
 const { 
return all_locks_.back(); }
 
 2038   size_type num_remaining_lazy_rehash_locks()
 const {
 
 2039     return num_remaining_lazy_rehash_locks_.load(
 
 2040         std::memory_order_acquire);
 
 2043   void num_remaining_lazy_rehash_locks(size_type n)
 const {
 
 2044     num_remaining_lazy_rehash_locks_.store(
 
 2045         n, std::memory_order_release);
 
 2047       old_buckets_.clear_and_deallocate();
 
 2051   void decrement_num_remaining_lazy_rehash_locks()
 const {
 
 2052     size_type old_num_remaining = num_remaining_lazy_rehash_locks_.fetch_sub(
 
 2053       1, std::memory_order_acq_rel);
 
 2054     assert(old_num_remaining >= 1);
 
 2055     if (old_num_remaining == 1) {
 
 2056       old_buckets_.clear_and_deallocate();
 
 2074   mutable buckets_t buckets_;
 
 2082   mutable buckets_t old_buckets_;
 
 2094   mutable all_locks_t all_locks_;
 
 2097   template <
typename AtomicT>
 
 2098   class CopyableAtomic : 
public std::atomic<AtomicT> {
 
 2100     using std::atomic<AtomicT>::atomic;
 
 2102     CopyableAtomic(
const CopyableAtomic& other) noexcept
 
 2103         : CopyableAtomic(other.load(std::memory_order_acquire)) {}
 
 2105     CopyableAtomic& operator=(
const CopyableAtomic& other) noexcept {
 
 2106         this->store(other.load(std::memory_order_acquire),
 
 2107                     std::memory_order_release);
 
 2118   mutable CopyableAtomic<size_t> num_remaining_lazy_rehash_locks_;
 
 2125   CopyableAtomic<double> minimum_load_factor_;
 
 2129   CopyableAtomic<size_type> maximum_hashpower_;
 
 2133   CopyableAtomic<size_type> max_num_worker_threads_;
 
 2152     using key_type = 
typename cuckoohash_map::key_type;
 
 2153     using mapped_type = 
typename cuckoohash_map::mapped_type;
 
 2155     using size_type = 
typename cuckoohash_map::size_type;
 
 2156     using difference_type = 
typename cuckoohash_map::difference_type;
 
 2157     using hasher = 
typename cuckoohash_map::hasher;
 
 2158     using key_equal = 
typename cuckoohash_map::key_equal;
 
 2159     using allocator_type = 
typename cuckoohash_map::allocator_type;
 
 2160     using reference = 
typename cuckoohash_map::reference;
 
 2161     using const_reference = 
typename cuckoohash_map::const_reference;
 
 2162     using pointer = 
typename cuckoohash_map::pointer;
 
 2163     using const_pointer = 
typename cuckoohash_map::const_pointer;
 
 2172       using difference_type = 
typename locked_table::difference_type;
 
 2173       using value_type = 
typename locked_table::value_type;
 
 2174       using pointer = 
typename locked_table::const_pointer;
 
 2175       using reference = 
typename locked_table::const_reference;
 
 2176       using iterator_category = std::bidirectional_iterator_tag;
 
 2183         return buckets_ == it.buckets_ && index_ == it.index_ &&
 
 2188         return !(operator==(it));
 
 2191       reference operator*()
 const { 
return (*buckets_)[index_].kvpair(slot_); }
 
 2193       pointer operator->()
 const { 
return std::addressof(
operator*()); }
 
 2201         for (; index_ < buckets_->size(); ++index_) {
 
 2202           for (; slot_ < slot_per_bucket(); ++slot_) {
 
 2203             if ((*buckets_)[index_].occupied(slot_)) {
 
 2209         assert(std::make_pair(index_, slot_) == end_pos(*buckets_));
 
 2230           slot_ = slot_per_bucket() - 1;
 
 2234         while (!(*buckets_)[index_].occupied(slot_)) {
 
 2237             slot_ = slot_per_bucket() - 1;
 
 2271       static std::pair<size_type, size_type> end_pos(
const buckets_t &buckets) {
 
 2272         return std::make_pair(buckets.size(), 0);
 
 2279       const_iterator(
buckets_t &buckets, size_type index,
 
 2280                      size_type slot) noexcept
 
 2281           : buckets_(std::addressof(buckets)), index_(index), slot_(slot) {
 
 2282         if (std::make_pair(index_, slot_) != end_pos(*buckets_) &&
 
 2283             !(*buckets_)[index_].occupied(slot_)) {
 
 2288       friend class locked_table;
 
 2298       using pointer = 
typename cuckoohash_map::pointer;
 
 2299       using reference = 
typename cuckoohash_map::reference;
 
 2303       bool operator==(
const iterator &it)
 const {
 
 2304         return const_iterator::operator==(it);
 
 2307       bool operator!=(
const iterator &it)
 const {
 
 2308         return const_iterator::operator!=(it);
 
 2311       reference operator*() {
 
 2312         return (*const_iterator::buckets_)[const_iterator::index_].kvpair(
 
 2313             const_iterator::slot_);
 
 2316       pointer operator->() { 
return std::addressof(
operator*()); }
 
 2319         const_iterator::operator++();
 
 2325         const_iterator::operator++();
 
 2330         const_iterator::operator--();
 
 2336         const_iterator::operator--();
 
 2352     static constexpr size_type slot_per_bucket() {
 
 2366         : map_(std::move(lt.map_)),
 
 2367           all_locks_manager_(std::move(lt.all_locks_manager_)) {}
 
 2369     locked_table &operator=(locked_table &<) noexcept {
 
 2371       map_ = std::move(lt.map_);
 
 2372       all_locks_manager_ = std::move(lt.all_locks_manager_);
 
 2381     void unlock() { all_locks_manager_.reset(); }
 
 2399     bool is_active()
 const { 
return static_cast<bool>(all_locks_manager_); }
 
 2401     hasher hash_function()
 const { 
return map_.get().hash_function(); }
 
 2403     key_equal key_eq()
 const { 
return map_.get().key_eq(); }
 
 2405     allocator_type get_allocator()
 const { 
return map_.get().get_allocator(); }
 
 2407     size_type hashpower()
 const { 
return map_.get().hashpower(); }
 
 2409     size_type bucket_count()
 const { 
return map_.get().bucket_count(); }
 
 2411     bool empty()
 const { 
return map_.get().empty(); }
 
 2413     size_type size()
 const { 
return map_.get().size(); }
 
 2415     size_type capacity()
 const { 
return map_.get().capacity(); }
 
 2417     double load_factor()
 const { 
return map_.get().load_factor(); }
 
 2419     void minimum_load_factor(
const double mlf) {
 
 2420       map_.get().minimum_load_factor(mlf);
 
 2423     double minimum_load_factor()
 const {
 
 2424       return map_.get().minimum_load_factor();
 
 2427     void maximum_hashpower(size_type mhp) { map_.get().maximum_hashpower(mhp); }
 
 2429     size_type maximum_hashpower()
 const {
 
 2430       return map_.get().maximum_hashpower();
 
 2433     void max_num_worker_threads(size_type extra_threads) {
 
 2434       map_.get().max_num_worker_threads(extra_threads);
 
 2437     size_type max_num_worker_threads()
 const {
 
 2438       return map_.get().max_num_worker_threads();
 
 2455     const_iterator begin()
 const {
 
 2456       return const_iterator(map_.get().buckets_, 0, 0);
 
 2459     const_iterator cbegin()
 const { 
return begin(); }
 
 2468       const auto end_pos = const_iterator::end_pos(map_.get().buckets_);
 
 2469       return iterator(map_.get().buckets_,
 
 2470                       static_cast<size_type
>(end_pos.first),
 
 2471                       static_cast<size_type
>(end_pos.second));
 
 2474     const_iterator end()
 const {
 
 2475       const auto end_pos = const_iterator::end_pos(map_.get().buckets_);
 
 2476       return const_iterator(map_.get().buckets_,
 
 2477                             static_cast<size_type
>(end_pos.first),
 
 2478                             static_cast<size_type
>(end_pos.second));
 
 2481     const_iterator cend()
 const { 
return end(); }
 
 2488     void clear() { map_.get().cuckoo_clear(); }
 
 2495     template <
typename K, 
typename... Args>
 
 2496     std::pair<iterator, bool> 
insert(K &&key, Args &&... val) {
 
 2497       hash_value hv = map_.get().hashed_key(key);
 
 2498       auto b = map_.get().template snapshot_and_lock_two<locked_table_mode>(hv);
 
 2499       table_position pos =
 
 2500           map_.get().template cuckoo_insert_loop<locked_table_mode>(hv, b, key);
 
 2501       if (pos.status == ok) {
 
 2502         map_.get().add_to_bucket(pos.index, pos.slot, hv.partial,
 
 2503                                  std::forward<K>(key),
 
 2504                                  std::forward<Args>(val)...);
 
 2506         assert(pos.status == failure_key_duplicated);
 
 2508       return std::make_pair(
iterator(map_.get().buckets_, pos.index, pos.slot),
 
 2512     iterator erase(const_iterator pos) {
 
 2513       map_.get().del_from_bucket(pos.index_, pos.slot_);
 
 2514       return iterator(map_.get().buckets_, pos.index_, pos.slot_);
 
 2517     iterator erase(iterator pos) {
 
 2518       map_.get().del_from_bucket(pos.index_, pos.slot_);
 
 2519       return iterator(map_.get().buckets_, pos.index_, pos.slot_);
 
 2522     template <
typename K> size_type erase(
const K &key) {
 
 2523       const hash_value hv = map_.get().hashed_key(key);
 
 2525           map_.get().template snapshot_and_lock_two<locked_table_mode>(hv);
 
 2526       const table_position pos =
 
 2527           map_.get().cuckoo_find(key, hv.partial, b.i1, b.i2);
 
 2528       if (pos.status == ok) {
 
 2529         map_.get().del_from_bucket(pos.index, pos.slot);
 
 2541     template <
typename K> iterator find(
const K &key) {
 
 2542       const hash_value hv = map_.get().hashed_key(key);
 
 2544           map_.get().template snapshot_and_lock_two<locked_table_mode>(hv);
 
 2545       const table_position pos =
 
 2546           map_.get().cuckoo_find(key, hv.partial, b.i1, b.i2);
 
 2547       if (pos.status == ok) {
 
 2548         return iterator(map_.get().buckets_, pos.index, pos.slot);
 
 2554     template <
typename K> const_iterator find(
const K &key)
 const {
 
 2555       const hash_value hv = map_.get().hashed_key(key);
 
 2557           map_.get().template snapshot_and_lock_two<locked_table_mode>(hv);
 
 2558       const table_position pos =
 
 2559           map_.get().cuckoo_find(key, hv.partial, b.i1, b.i2);
 
 2560       if (pos.status == ok) {
 
 2561         return const_iterator(map_.get().buckets_, pos.index, pos.slot);
 
 2567     template <
typename K> mapped_type &at(
const K &key) {
 
 2568       auto it = find(key);
 
 2570         throw std::out_of_range(
"key not found in table");
 
 2576     template <
typename K> 
const mapped_type &at(
const K &key)
 const {
 
 2577       auto it = find(key);
 
 2579         throw std::out_of_range(
"key not found in table");
 
 2591       auto result = insert(std::forward<K>(key));
 
 2592       return result.first->second;
 
 2595     template <
typename K> size_type count(
const K &key)
 const {
 
 2596       const hash_value hv = map_.get().hashed_key(key);
 
 2598           map_.get().template snapshot_and_lock_two<locked_table_mode>(hv);
 
 2599       return map_.get().cuckoo_find(key, hv.partial, b.i1, b.i2).status == ok
 
 2604     template <
typename K>
 
 2605     std::pair<iterator, iterator> equal_range(
const K &key) {
 
 2606       auto it = find(key);
 
 2608         return std::make_pair(it, it);
 
 2610         auto start_it = it++;
 
 2611         return std::make_pair(start_it, it);
 
 2615     template <
typename K>
 
 2616     std::pair<const_iterator, const_iterator> equal_range(
const K &key)
 const {
 
 2617       auto it = find(key);
 
 2619         return std::make_pair(it, it);
 
 2621         auto start_it = it++;
 
 2622         return std::make_pair(start_it, it);
 
 2636       map_.get().template cuckoo_rehash<locked_table_mode>(n);
 
 2644       map_.get().template cuckoo_reserve<locked_table_mode>(n);
 
 2653       if (size() != lt.size()) {
 
 2656       for (
const auto &elem : lt) {
 
 2657         auto it = find(elem.first);
 
 2658         if (it == end() || it->second != elem.second) {
 
 2665     bool operator!=(
const locked_table <)
 const {
 
 2666       if (size() != lt.size()) {
 
 2669       for (
const auto &elem : lt) {
 
 2670         auto it = find(elem.first);
 
 2671         if (it == end() || it->second != elem.second) {
 
 2685     locked_table(cuckoohash_map &map) noexcept
 
 2687           all_locks_manager_(map.lock_all(normal_mode())) {
 
 2688       map.rehash_with_workers();
 
 2693     buckets_t &buckets() { 
return map_.get().buckets_; }
 
 2695     const buckets_t &buckets()
 const { 
return map_.get().buckets_; }
 
 2697     void maybe_resize_locks(size_type new_bucket_count) {
 
 2698       map_.get().maybe_resize_locks(new_bucket_count);
 
 2701     locks_t &get_current_locks() { 
return map_.get().get_current_locks(); }
 
 2704     std::reference_wrapper<cuckoohash_map> map_;
 
 2706     AllLocksManager all_locks_manager_;
 
 2708     friend class cuckoohash_map;
 
 2710     friend std::ostream &operator<<(std::ostream &os, 
const locked_table <) {
 
 2712       size_type size = lt.size();
 
 2713       os.write(
reinterpret_cast<const char *
>(&size), 
sizeof(size_type));
 
 2714       double mlf = lt.minimum_load_factor();
 
 2715       size_type mhp = lt.maximum_hashpower();
 
 2716       os.write(
reinterpret_cast<const char *
>(&mlf), 
sizeof(
double));
 
 2717       os.write(
reinterpret_cast<const char *
>(&mhp), 
sizeof(size_type));
 
 2721     friend std::istream &operator>>(std::istream &is, locked_table <) {
 
 2725       lt.maybe_resize_locks(lt.bucket_count());
 
 2726       for (
auto &lock : lt.get_current_locks()) {
 
 2727         lock.elem_counter() = 0;
 
 2730       is.read(
reinterpret_cast<char *
>(&size), 
sizeof(size_type));
 
 2732         lt.get_current_locks()[0].elem_counter() = size;
 
 2737       is.read(
reinterpret_cast<char *
>(&mlf), 
sizeof(
double));
 
 2738       is.read(
reinterpret_cast<char *
>(&mhp), 
sizeof(size_type));
 
 2739       lt.minimum_load_factor(mlf);
 
 2740       lt.maximum_hashpower(mhp);
 
 2753 template <
class Key, 
class T, 
class Hash, 
class KeyEqual, 
class Allocator,
 
 2754           std::size_t SLOT_PER_BUCKET>
 
 2764 #endif // _CUCKOOHASH_MAP_HH