gdbsupport: bump unordered_dense to 4.8.0

We don't need anything in this release, but I think it doesn't hurt to
just stay up to date.  The new version has a new include file, stl.h.
To keep things clean and separated, move the imported files to a new
sub-directory.  This requires a small change in
gdb/check-include-guards.py, to be able to ignore the whole new
directory.

Change-Id: Ic8c5d0dd5ea8b6691c99975d6ca78f637175ef42
Approved-By: Tom Tromey <tom@tromey.com>
This commit is contained in:
Simon Marchi
2025-10-27 15:25:51 -04:00
parent 9f9eeeb6da
commit ddaee713f5
5 changed files with 330 additions and 243 deletions

View File

@@ -23,6 +23,7 @@
# When --update is used, rewrite the files in place as needed. # When --update is used, rewrite the files in place as needed.
import fnmatch
import re import re
import sys import sys
from typing import List from typing import List
@@ -32,7 +33,9 @@ OLDDEF = re.compile("^#if !defined *\\(([A-Za-z0-9_]+)\\)\n")
# Some headers -- in particular, ones that aren't maintained by gdb -- # Some headers -- in particular, ones that aren't maintained by gdb --
# should be excluded from the checks. # should be excluded from the checks.
EXCLUDED = frozenset(["gdbsupport/unordered_dense.h"]) #
# This is interpreted as a list of patterns as interpreted by fnmatch.
EXCLUDED = ("gdbsupport/unordered_dense/*",)
# See if # See if
@@ -68,8 +71,9 @@ def write_header(filename: str, contents: List[str]):
def check_header(filename: str): def check_header(filename: str):
if filename in EXCLUDED: for pat in EXCLUDED:
return if fnmatch.fnmatch(filename, pat):
return
# Turn x/y-z.h into X_Y_Z_H. # Turn x/y-z.h into X_Y_Z_H.
assert filename.endswith(".h") assert filename.endswith(".h")

View File

@@ -0,0 +1,83 @@
///////////////////////// ankerl::unordered_dense::{map, set} /////////////////////////
// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion.
// Version 4.8.0
// https://github.com/martinus/unordered_dense
//
// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
// SPDX-License-Identifier: MIT
// Copyright (c) 2022-2024 Martin Leitner-Ankerl <martin.ankerl@gmail.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#ifndef ANKERL_STL_H
#define ANKERL_STL_H
#include <array> // for array
#include <cstdint> // for uint64_t, uint32_t, std::uint8_t, UINT64_C
#include <cstring> // for size_t, memcpy, memset
#include <functional> // for equal_to, hash
#include <initializer_list> // for initializer_list
#include <iterator> // for pair, distance
#include <limits> // for numeric_limits
#include <memory> // for allocator, allocator_traits, shared_ptr
#include <optional> // for optional
#include <stdexcept> // for out_of_range
#include <string> // for basic_string
#include <string_view> // for basic_string_view, hash
#include <tuple> // for forward_as_tuple
#include <type_traits> // for enable_if_t, declval, conditional_t, ena...
#include <utility> // for forward, exchange, pair, as_const, piece...
#include <vector> // for vector
// <memory_resource> includes <mutex>, which fails to compile if
// targeting GCC >= 13 with the (rewritten) win32 thread model, and
// targeting Windows earlier than Vista (0x600). GCC predefines
// _REENTRANT when using the 'posix' model, and doesn't when using the
// 'win32' model.
#if defined __MINGW64__ && defined __GNUC__ && __GNUC__ >= 13 && !defined _REENTRANT
// _WIN32_WINNT is guaranteed to be defined here because of the
// <cstdint> inclusion above.
# ifndef _WIN32_WINNT
# error "_WIN32_WINNT not defined"
# endif
# if _WIN32_WINNT < 0x600
# define ANKERL_MEMORY_RESOURCE_IS_BAD() 1 // NOLINT(cppcoreguidelines-macro-usage)
# endif
#endif
#ifndef ANKERL_MEMORY_RESOURCE_IS_BAD
# define ANKERL_MEMORY_RESOURCE_IS_BAD() 0 // NOLINT(cppcoreguidelines-macro-usage)
#endif
#if defined(__has_include) && !defined(ANKERL_UNORDERED_DENSE_DISABLE_PMR)
# if __has_include(<memory_resource>) && !ANKERL_MEMORY_RESOURCE_IS_BAD()
# define ANKERL_UNORDERED_DENSE_PMR std::pmr // NOLINT(cppcoreguidelines-macro-usage)
# include <memory_resource> // for polymorphic_allocator
# elif __has_include(<experimental/memory_resource>)
# define ANKERL_UNORDERED_DENSE_PMR std::experimental::pmr // NOLINT(cppcoreguidelines-macro-usage)
# include <experimental/memory_resource> // for polymorphic_allocator
# endif
#endif
#if defined(_MSC_VER) && defined(_M_X64)
# include <intrin.h>
# pragma intrinsic(_umul128)
#endif
#endif

View File

@@ -1,7 +1,7 @@
///////////////////////// ankerl::unordered_dense::{map, set} ///////////////////////// ///////////////////////// ankerl::unordered_dense::{map, set} /////////////////////////
// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion. // A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion.
// Version 4.6.0 // Version 4.8.0
// https://github.com/martinus/unordered_dense // https://github.com/martinus/unordered_dense
// //
// Licensed under the MIT License <http://opensource.org/licenses/MIT>. // Licensed under the MIT License <http://opensource.org/licenses/MIT>.
@@ -31,7 +31,7 @@
// see https://semver.org/spec/v2.0.0.html // see https://semver.org/spec/v2.0.0.html
#define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 4 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes #define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 4 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes
#define ANKERL_UNORDERED_DENSE_VERSION_MINOR 6 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality #define ANKERL_UNORDERED_DENSE_VERSION_MINOR 8 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality
#define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes #define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes
// API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/ // API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/
@@ -81,66 +81,17 @@
# define ANKERL_UNORDERED_DENSE_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK # define ANKERL_UNORDERED_DENSE_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK
#endif #endif
// defined in unordered_dense.cpp
#if !defined(ANKERL_UNORDERED_DENSE_EXPORT)
# define ANKERL_UNORDERED_DENSE_EXPORT
#endif
#if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L #if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L
# error ankerl::unordered_dense requires C++17 or higher # error ankerl::unordered_dense requires C++17 or higher
#else #else
# include <array> // for array
# include <cstdint> // for uint64_t, uint32_t, uint8_t, UINT64_C # if !defined(ANKERL_UNORDERED_DENSE_STD_MODULE)
# include <cstring> // for size_t, memcpy, memset // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
# include <functional> // for equal_to, hash # define ANKERL_UNORDERED_DENSE_STD_MODULE 0
# include <initializer_list> // for initializer_list
# include <iterator> // for pair, distance
# include <limits> // for numeric_limits
# include <memory> // for allocator, allocator_traits, shared_ptr
# include <optional> // for optional
# include <stdexcept> // for out_of_range
# include <string> // for basic_string
# include <string_view> // for basic_string_view, hash
# include <tuple> // for forward_as_tuple
# include <type_traits> // for enable_if_t, declval, conditional_t, ena...
# include <utility> // for forward, exchange, pair, as_const, piece...
# include <vector> // for vector
# if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() == 0
# include <cstdlib> // for abort
# endif # endif
// <memory_resource> includes <mutex>, which fails to compile if # if !ANKERL_UNORDERED_DENSE_STD_MODULE
// targeting GCC >= 13 with the (rewritten) win32 thread model, and # include "stl.h"
// targeting Windows earlier than Vista (0x600). GCC predefines
// _REENTRANT when using the 'posix' model, and doesn't when using the
// 'win32' model.
# if defined __MINGW64__ && defined __GNUC__ && __GNUC__ >= 13 && !defined _REENTRANT
// _WIN32_WINNT is guaranteed to be defined here because of the
// <cstdint> inclusion above.
# ifndef _WIN32_WINNT
# error "_WIN32_WINNT not defined"
# endif
# if _WIN32_WINNT < 0x600
# define ANKERL_MEMORY_RESOURCE_IS_BAD() 1 // NOLINT(cppcoreguidelines-macro-usage)
# endif
# endif
# ifndef ANKERL_MEMORY_RESOURCE_IS_BAD
# define ANKERL_MEMORY_RESOURCE_IS_BAD() 0 // NOLINT(cppcoreguidelines-macro-usage)
# endif
# if defined(__has_include) && !defined(ANKERL_UNORDERED_DENSE_DISABLE_PMR)
# if __has_include(<memory_resource>) && !ANKERL_MEMORY_RESOURCE_IS_BAD()
# define ANKERL_UNORDERED_DENSE_PMR std::pmr // NOLINT(cppcoreguidelines-macro-usage)
# include <memory_resource> // for polymorphic_allocator
# elif __has_include(<experimental/memory_resource>)
# define ANKERL_UNORDERED_DENSE_PMR std::experimental::pmr // NOLINT(cppcoreguidelines-macro-usage)
# include <experimental/memory_resource> // for polymorphic_allocator
# endif
# endif
# if defined(_MSC_VER) && defined(_M_X64)
# include <intrin.h>
# pragma intrinsic(_umul128)
# endif # endif
# if __has_cpp_attribute(likely) && __has_cpp_attribute(unlikely) && ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L # if __has_cpp_attribute(likely) && __has_cpp_attribute(unlikely) && ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L
@@ -204,29 +155,29 @@ namespace detail {
// hardcodes seed and the secret, reformats the code, and clang-tidy fixes. // hardcodes seed and the secret, reformats the code, and clang-tidy fixes.
namespace detail::wyhash { namespace detail::wyhash {
inline void mum(uint64_t* a, uint64_t* b) { inline void mum(std::uint64_t* a, std::uint64_t* b) {
# if defined(__SIZEOF_INT128__) # if defined(__SIZEOF_INT128__)
__uint128_t r = *a; __uint128_t r = *a;
r *= *b; r *= *b;
*a = static_cast<uint64_t>(r); *a = static_cast<std::uint64_t>(r);
*b = static_cast<uint64_t>(r >> 64U); *b = static_cast<std::uint64_t>(r >> 64U);
# elif defined(_MSC_VER) && defined(_M_X64) # elif defined(_MSC_VER) && defined(_M_X64)
*a = _umul128(*a, *b, b); *a = _umul128(*a, *b, b);
# else # else
uint64_t ha = *a >> 32U; std::uint64_t ha = *a >> 32U;
uint64_t hb = *b >> 32U; std::uint64_t hb = *b >> 32U;
uint64_t la = static_cast<uint32_t>(*a); std::uint64_t la = static_cast<std::uint32_t>(*a);
uint64_t lb = static_cast<uint32_t>(*b); std::uint64_t lb = static_cast<std::uint32_t>(*b);
uint64_t hi{}; std::uint64_t hi{};
uint64_t lo{}; std::uint64_t lo{};
uint64_t rh = ha * hb; std::uint64_t rh = ha * hb;
uint64_t rm0 = ha * lb; std::uint64_t rm0 = ha * lb;
uint64_t rm1 = hb * la; std::uint64_t rm1 = hb * la;
uint64_t rl = la * lb; std::uint64_t rl = la * lb;
uint64_t t = rl + (rm0 << 32U); std::uint64_t t = rl + (rm0 << 32U);
auto c = static_cast<uint64_t>(t < rl); auto c = static_cast<std::uint64_t>(t < rl);
lo = t + (rm1 << 32U); lo = t + (rm1 << 32U);
c += static_cast<uint64_t>(lo < t); c += static_cast<std::uint64_t>(lo < t);
hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c; hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c;
*a = lo; *a = lo;
*b = hi; *b = hi;
@@ -234,39 +185,39 @@ inline void mum(uint64_t* a, uint64_t* b) {
} }
// multiply and xor mix function, aka MUM // multiply and xor mix function, aka MUM
[[nodiscard]] inline auto mix(uint64_t a, uint64_t b) -> uint64_t { [[nodiscard]] inline auto mix(std::uint64_t a, std::uint64_t b) -> std::uint64_t {
mum(&a, &b); mum(&a, &b);
return a ^ b; return a ^ b;
} }
// read functions. WARNING: we don't care about endianness, so results are different on big endian! // read functions. WARNING: we don't care about endianness, so results are different on big endian!
[[nodiscard]] inline auto r8(const uint8_t* p) -> uint64_t { [[nodiscard]] inline auto r8(const std::uint8_t* p) -> std::uint64_t {
uint64_t v{}; std::uint64_t v{};
std::memcpy(&v, p, 8U); std::memcpy(&v, p, 8U);
return v; return v;
} }
[[nodiscard]] inline auto r4(const uint8_t* p) -> uint64_t { [[nodiscard]] inline auto r4(const std::uint8_t* p) -> std::uint64_t {
uint32_t v{}; std::uint32_t v{};
std::memcpy(&v, p, 4); std::memcpy(&v, p, 4);
return v; return v;
} }
// reads 1, 2, or 3 bytes // reads 1, 2, or 3 bytes
[[nodiscard]] inline auto r3(const uint8_t* p, size_t k) -> uint64_t { [[nodiscard]] inline auto r3(const std::uint8_t* p, std::size_t k) -> std::uint64_t {
return (static_cast<uint64_t>(p[0]) << 16U) | (static_cast<uint64_t>(p[k >> 1U]) << 8U) | p[k - 1]; return (static_cast<std::uint64_t>(p[0]) << 16U) | (static_cast<std::uint64_t>(p[k >> 1U]) << 8U) | p[k - 1];
} }
[[maybe_unused]] [[nodiscard]] inline auto hash(void const* key, size_t len) -> uint64_t { [[maybe_unused]] [[nodiscard]] inline auto hash(void const* key, std::size_t len) -> std::uint64_t {
static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f), static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f),
UINT64_C(0xe7037ed1a0b428db), UINT64_C(0xe7037ed1a0b428db),
UINT64_C(0x8ebc6af09c88c6e3), UINT64_C(0x8ebc6af09c88c6e3),
UINT64_C(0x589965cc75374cc3)}; UINT64_C(0x589965cc75374cc3)};
auto const* p = static_cast<uint8_t const*>(key); auto const* p = static_cast<uint8_t const*>(key);
uint64_t seed = secret[0]; std::uint64_t seed = secret[0];
uint64_t a{}; std::uint64_t a{};
uint64_t b{}; std::uint64_t b{};
if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16))
ANKERL_UNORDERED_DENSE_LIKELY_ATTR { ANKERL_UNORDERED_DENSE_LIKELY_ATTR {
if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4))
@@ -285,11 +236,11 @@ inline void mum(uint64_t* a, uint64_t* b) {
} }
} }
else { else {
size_t i = len; std::size_t i = len;
if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48))
ANKERL_UNORDERED_DENSE_UNLIKELY_ATTR { ANKERL_UNORDERED_DENSE_UNLIKELY_ATTR {
uint64_t see1 = seed; std::uint64_t see1 = seed;
uint64_t see2 = seed; std::uint64_t see2 = seed;
do { do {
seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1); see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1);
@@ -312,16 +263,16 @@ inline void mum(uint64_t* a, uint64_t* b) {
return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed)); return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed));
} }
[[nodiscard]] inline auto hash(uint64_t x) -> uint64_t { [[nodiscard]] inline auto hash(std::uint64_t x) -> std::uint64_t {
return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15)); return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15));
} }
} // namespace detail::wyhash } // namespace detail::wyhash
ANKERL_UNORDERED_DENSE_EXPORT template <typename T, typename Enable = void> template <typename T, typename Enable = void>
struct hash { struct hash {
auto operator()(T const& obj) const noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>()))) auto operator()(T const& obj) const noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>())))
-> uint64_t { -> std::uint64_t {
return std::hash<T>{}(obj); return std::hash<T>{}(obj);
} }
}; };
@@ -330,7 +281,7 @@ template <typename T>
struct hash<T, typename std::hash<T>::is_avalanching> { struct hash<T, typename std::hash<T>::is_avalanching> {
using is_avalanching = void; using is_avalanching = void;
auto operator()(T const& obj) const noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>()))) auto operator()(T const& obj) const noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>())))
-> uint64_t { -> std::uint64_t {
return std::hash<T>{}(obj); return std::hash<T>{}(obj);
} }
}; };
@@ -338,7 +289,7 @@ struct hash<T, typename std::hash<T>::is_avalanching> {
template <typename CharT> template <typename CharT>
struct hash<std::basic_string<CharT>> { struct hash<std::basic_string<CharT>> {
using is_avalanching = void; using is_avalanching = void;
auto operator()(std::basic_string<CharT> const& str) const noexcept -> uint64_t { auto operator()(std::basic_string<CharT> const& str) const noexcept -> std::uint64_t {
return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size()); return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size());
} }
}; };
@@ -346,7 +297,7 @@ struct hash<std::basic_string<CharT>> {
template <typename CharT> template <typename CharT>
struct hash<std::basic_string_view<CharT>> { struct hash<std::basic_string_view<CharT>> {
using is_avalanching = void; using is_avalanching = void;
auto operator()(std::basic_string_view<CharT> const& sv) const noexcept -> uint64_t { auto operator()(std::basic_string_view<CharT> const& sv) const noexcept -> std::uint64_t {
return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size()); return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size());
} }
}; };
@@ -354,34 +305,34 @@ struct hash<std::basic_string_view<CharT>> {
template <class T> template <class T>
struct hash<T*> { struct hash<T*> {
using is_avalanching = void; using is_avalanching = void;
auto operator()(T* ptr) const noexcept -> uint64_t { auto operator()(T* ptr) const noexcept -> std::uint64_t {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr)); return detail::wyhash::hash(reinterpret_cast<std::uintptr_t>(ptr));
} }
}; };
template <class T> template <class T>
struct hash<std::unique_ptr<T>> { struct hash<std::unique_ptr<T>> {
using is_avalanching = void; using is_avalanching = void;
auto operator()(std::unique_ptr<T> const& ptr) const noexcept -> uint64_t { auto operator()(std::unique_ptr<T> const& ptr) const noexcept -> std::uint64_t {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get())); return detail::wyhash::hash(reinterpret_cast<std::uintptr_t>(ptr.get()));
} }
}; };
template <class T> template <class T>
struct hash<std::shared_ptr<T>> { struct hash<std::shared_ptr<T>> {
using is_avalanching = void; using is_avalanching = void;
auto operator()(std::shared_ptr<T> const& ptr) const noexcept -> uint64_t { auto operator()(std::shared_ptr<T> const& ptr) const noexcept -> std::uint64_t {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get())); return detail::wyhash::hash(reinterpret_cast<std::uintptr_t>(ptr.get()));
} }
}; };
template <typename Enum> template <typename Enum>
struct hash<Enum, typename std::enable_if_t<std::is_enum_v<Enum>>> { struct hash<Enum, typename std::enable_if_t<std::is_enum_v<Enum>>> {
using is_avalanching = void; using is_avalanching = void;
auto operator()(Enum e) const noexcept -> uint64_t { auto operator()(Enum e) const noexcept -> std::uint64_t {
using underlying = std::underlying_type_t<Enum>; using underlying = std::underlying_type_t<Enum>;
return detail::wyhash::hash(static_cast<underlying>(e)); return detail::wyhash::hash(static_cast<underlying>(e));
} }
@@ -392,25 +343,26 @@ struct tuple_hash_helper {
// Converts the value into 64bit. If it is an integral type, just cast it. Mixing is doing the rest. // Converts the value into 64bit. If it is an integral type, just cast it. Mixing is doing the rest.
// If it isn't an integral we need to hash it. // If it isn't an integral we need to hash it.
template <typename Arg> template <typename Arg>
[[nodiscard]] constexpr static auto to64(Arg const& arg) -> uint64_t { [[nodiscard]] constexpr static auto to64(Arg const& arg) -> std::uint64_t {
if constexpr (std::is_integral_v<Arg> || std::is_enum_v<Arg>) { if constexpr (std::is_integral_v<Arg> || std::is_enum_v<Arg>) {
return static_cast<uint64_t>(arg); return static_cast<std::uint64_t>(arg);
} else { } else {
return hash<Arg>{}(arg); return hash<Arg>{}(arg);
} }
} }
[[nodiscard]] ANKERL_UNORDERED_DENSE_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK static auto mix64(uint64_t state, uint64_t v) [[nodiscard]] ANKERL_UNORDERED_DENSE_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK static auto mix64(std::uint64_t state,
-> uint64_t { std::uint64_t v)
return detail::wyhash::mix(state + v, uint64_t{0x9ddfea08eb382d69}); -> std::uint64_t {
return detail::wyhash::mix(state + v, std::uint64_t{0x9ddfea08eb382d69});
} }
// Creates a buffer that holds all the data from each element of the tuple. If possible we memcpy the data directly. If // Creates a buffer that holds all the data from each element of the tuple. If possible we memcpy the data directly. If
// not, we hash the object and use this for the array. Size of the array is known at compile time, and memcpy is optimized // not, we hash the object and use this for the array. Size of the array is known at compile time, and memcpy is optimized
// away, so filling the buffer is highly efficient. Finally, call wyhash with this buffer. // away, so filling the buffer is highly efficient. Finally, call wyhash with this buffer.
template <typename T, std::size_t... Idx> template <typename T, std::size_t... Idx>
[[nodiscard]] static auto calc_hash(T const& t, std::index_sequence<Idx...> /*unused*/) noexcept -> uint64_t { [[nodiscard]] static auto calc_hash(T const& t, std::index_sequence<Idx...> /*unused*/) noexcept -> std::uint64_t {
auto h = uint64_t{}; auto h = std::uint64_t{};
((h = mix64(h, to64(std::get<Idx>(t)))), ...); ((h = mix64(h, to64(std::get<Idx>(t)))), ...);
return h; return h;
} }
@@ -419,7 +371,7 @@ struct tuple_hash_helper {
template <typename... Args> template <typename... Args>
struct hash<std::tuple<Args...>> : tuple_hash_helper<Args...> { struct hash<std::tuple<Args...>> : tuple_hash_helper<Args...> {
using is_avalanching = void; using is_avalanching = void;
auto operator()(std::tuple<Args...> const& t) const noexcept -> uint64_t { auto operator()(std::tuple<Args...> const& t) const noexcept -> std::uint64_t {
return tuple_hash_helper<Args...>::calc_hash(t, std::index_sequence_for<Args...>{}); return tuple_hash_helper<Args...>::calc_hash(t, std::index_sequence_for<Args...>{});
} }
}; };
@@ -427,19 +379,19 @@ struct hash<std::tuple<Args...>> : tuple_hash_helper<Args...> {
template <typename A, typename B> template <typename A, typename B>
struct hash<std::pair<A, B>> : tuple_hash_helper<A, B> { struct hash<std::pair<A, B>> : tuple_hash_helper<A, B> {
using is_avalanching = void; using is_avalanching = void;
auto operator()(std::pair<A, B> const& t) const noexcept -> uint64_t { auto operator()(std::pair<A, B> const& t) const noexcept -> std::uint64_t {
return tuple_hash_helper<A, B>::calc_hash(t, std::index_sequence_for<A, B>{}); return tuple_hash_helper<A, B>::calc_hash(t, std::index_sequence_for<A, B>{});
} }
}; };
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
# define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \ # define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \
template <> \ template <> \
struct hash<T> { \ struct hash<T> { \
using is_avalanching = void; \ using is_avalanching = void; \
auto operator()(T const& obj) const noexcept -> uint64_t { \ auto operator()(T const& obj) const noexcept -> std::uint64_t { \
return detail::wyhash::hash(static_cast<uint64_t>(obj)); \ return detail::wyhash::hash(static_cast<std::uint64_t>(obj)); \
} \ } \
} }
# if defined(__GNUC__) && !defined(__clang__) # if defined(__GNUC__) && !defined(__clang__)
@@ -475,19 +427,19 @@ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long);
namespace bucket_type { namespace bucket_type {
struct standard { struct standard {
static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint static constexpr std::uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint
static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint static constexpr std::uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash std::uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
uint32_t m_value_idx; // index into the m_values vector. std::uint32_t m_value_idx; // index into the m_values vector.
}; };
ANKERL_UNORDERED_DENSE_PACK(struct big { ANKERL_UNORDERED_DENSE_PACK(struct big {
static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint static constexpr std::uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint
static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint static constexpr std::uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash std::uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
size_t m_value_idx; // index into the m_values vector. std::size_t m_value_idx; // index into the m_values vector.
}); });
} // namespace bucket_type } // namespace bucket_type
@@ -525,7 +477,7 @@ template <typename T>
using detect_iterator = typename T::iterator; using detect_iterator = typename T::iterator;
template <typename T> template <typename T>
using detect_reserve = decltype(std::declval<T&>().reserve(size_t{})); using detect_reserve = decltype(std::declval<T&>().reserve(std::size_t{}));
// enable_if helpers // enable_if helpers
@@ -559,7 +511,7 @@ struct base_table_type_set {};
// It allocates blocks of equal size and puts them into the m_blocks vector. That means it can grow simply by adding a new // It allocates blocks of equal size and puts them into the m_blocks vector. That means it can grow simply by adding a new
// block to the back of m_blocks, and doesn't double its size like an std::vector. The disadvantage is that memory is not // block to the back of m_blocks, and doesn't double its size like an std::vector. The disadvantage is that memory is not
// linear and thus there is one more indirection necessary for indexing. // linear and thus there is one more indirection necessary for indexing.
template <typename T, typename Allocator = std::allocator<T>, size_t MaxSegmentSizeBytes = 4096> template <typename T, typename Allocator = std::allocator<T>, std::size_t MaxSegmentSizeBytes = 4096>
class segmented_vector { class segmented_vector {
template <bool IsConst> template <bool IsConst>
class iter_t; class iter_t;
@@ -579,11 +531,11 @@ public:
private: private:
using vec_alloc = typename std::allocator_traits<Allocator>::template rebind_alloc<pointer>; using vec_alloc = typename std::allocator_traits<Allocator>::template rebind_alloc<pointer>;
std::vector<pointer, vec_alloc> m_blocks{}; std::vector<pointer, vec_alloc> m_blocks{};
size_t m_size{}; std::size_t m_size{};
// Calculates the maximum number for x in (s << x) <= max_val // Calculates the maximum number for x in (s << x) <= max_val
static constexpr auto num_bits_closest(size_t max_val, size_t s) -> size_t { static constexpr auto num_bits_closest(std::size_t max_val, std::size_t s) -> std::size_t {
auto f = size_t{0}; auto f = std::size_t{0};
while (s << (f + 1) <= max_val) { while (s << (f + 1) <= max_val) {
++f; ++f;
} }
@@ -602,7 +554,7 @@ private:
class iter_t { class iter_t {
using ptr_t = std::conditional_t<IsConst, segmented_vector::const_pointer const*, segmented_vector::pointer*>; using ptr_t = std::conditional_t<IsConst, segmented_vector::const_pointer const*, segmented_vector::pointer*>;
ptr_t m_data{}; ptr_t m_data{};
size_t m_idx{}; std::size_t m_idx{};
template <bool B> template <bool B>
friend class iter_t; friend class iter_t;
@@ -622,7 +574,7 @@ private:
: m_data(other.m_data) : m_data(other.m_data)
, m_idx(other.m_idx) {} , m_idx(other.m_idx) {}
constexpr iter_t(ptr_t data, size_t idx) noexcept constexpr iter_t(ptr_t data, std::size_t idx) noexcept
: m_data(data) : m_data(data)
, m_idx(idx) {} , m_idx(idx) {}
@@ -656,7 +608,7 @@ private:
} }
[[nodiscard]] constexpr auto operator+(difference_type diff) const noexcept -> iter_t { [[nodiscard]] constexpr auto operator+(difference_type diff) const noexcept -> iter_t {
return {m_data, static_cast<size_t>(static_cast<difference_type>(m_idx) + diff)}; return {m_data, static_cast<std::size_t>(static_cast<difference_type>(m_idx) + diff)};
} }
constexpr auto operator+=(difference_type diff) noexcept -> iter_t& { constexpr auto operator+=(difference_type diff) noexcept -> iter_t& {
@@ -747,7 +699,7 @@ private:
} }
} }
[[nodiscard]] static constexpr auto calc_num_blocks_for_capacity(size_t capacity) { [[nodiscard]] static constexpr auto calc_num_blocks_for_capacity(std::size_t capacity) {
return (capacity + num_elements_in_block - 1U) / num_elements_in_block; return (capacity + num_elements_in_block - 1U) / num_elements_in_block;
} }
@@ -812,20 +764,20 @@ public:
dealloc(); dealloc();
} }
[[nodiscard]] constexpr auto size() const -> size_t { [[nodiscard]] constexpr auto size() const -> std::size_t {
return m_size; return m_size;
} }
[[nodiscard]] constexpr auto capacity() const -> size_t { [[nodiscard]] constexpr auto capacity() const -> std::size_t {
return m_blocks.size() * num_elements_in_block; return m_blocks.size() * num_elements_in_block;
} }
// Indexing is highly performance critical // Indexing is highly performance critical
[[nodiscard]] constexpr auto operator[](size_t i) const noexcept -> T const& { [[nodiscard]] constexpr auto operator[](std::size_t i) const noexcept -> T const& {
return m_blocks[i >> num_bits][i & mask]; return m_blocks[i >> num_bits][i & mask];
} }
[[nodiscard]] constexpr auto operator[](size_t i) noexcept -> T& { [[nodiscard]] constexpr auto operator[](std::size_t i) noexcept -> T& {
return m_blocks[i >> num_bits][i & mask]; return m_blocks[i >> num_bits][i & mask];
} }
@@ -865,7 +817,7 @@ public:
return 0 == m_size; return 0 == m_size;
} }
void reserve(size_t new_capacity) { void reserve(std::size_t new_capacity) {
m_blocks.reserve(calc_num_blocks_for_capacity(new_capacity)); m_blocks.reserve(calc_num_blocks_for_capacity(new_capacity));
while (new_capacity > capacity()) { while (new_capacity > capacity()) {
increase_capacity(); increase_capacity();
@@ -913,7 +865,7 @@ public:
void clear() { void clear() {
if constexpr (!std::is_trivially_destructible_v<T>) { if constexpr (!std::is_trivially_destructible_v<T>) {
for (size_t i = 0, s = size(); i < s; ++i) { for (std::size_t i = 0, s = size(); i < s; ++i) {
operator[](i).~T(); operator[](i).~T();
} }
} }
@@ -962,7 +914,7 @@ private:
default_bucket_container_type, default_bucket_container_type,
BucketContainer>; BucketContainer>;
static constexpr uint8_t initial_shifts = 64 - 2; // 2^(64-m_shift) number of buckets static constexpr std::uint8_t initial_shifts = 64 - 2; // 2^(64-m_shift) number of buckets
static constexpr float default_max_load_factor = 0.8F; static constexpr float default_max_load_factor = 0.8F;
public: public:
@@ -990,11 +942,11 @@ private:
value_container_type m_values{}; // Contains all the key-value pairs in one densely stored container. No holes. value_container_type m_values{}; // Contains all the key-value pairs in one densely stored container. No holes.
bucket_container_type m_buckets{}; bucket_container_type m_buckets{};
size_t m_max_bucket_capacity = 0; std::size_t m_max_bucket_capacity = 0;
float m_max_load_factor = default_max_load_factor; float m_max_load_factor = default_max_load_factor;
Hash m_hash{}; Hash m_hash{};
KeyEqual m_equal{}; KeyEqual m_equal{};
uint8_t m_shifts = initial_shifts; std::uint8_t m_shifts = initial_shifts;
[[nodiscard]] auto next(value_idx_type bucket_idx) const -> value_idx_type { [[nodiscard]] auto next(value_idx_type bucket_idx) const -> value_idx_type {
if (ANKERL_UNORDERED_DENSE_UNLIKELY(bucket_idx + 1U == bucket_count())) if (ANKERL_UNORDERED_DENSE_UNLIKELY(bucket_idx + 1U == bucket_count()))
@@ -1006,15 +958,15 @@ private:
} }
// Helper to access bucket through pointer types // Helper to access bucket through pointer types
[[nodiscard]] static constexpr auto at(bucket_container_type& bucket, size_t offset) -> Bucket& { [[nodiscard]] static constexpr auto at(bucket_container_type& bucket, std::size_t offset) -> Bucket& {
return bucket[offset]; return bucket[offset];
} }
[[nodiscard]] static constexpr auto at(const bucket_container_type& bucket, size_t offset) -> const Bucket& { [[nodiscard]] static constexpr auto at(const bucket_container_type& bucket, std::size_t offset) -> const Bucket& {
return bucket[offset]; return bucket[offset];
} }
// use the dist_inc and dist_dec functions so that uint16_t types work without warning // use the dist_inc and dist_dec functions so that std::uint16_t types work without warning
[[nodiscard]] static constexpr auto dist_inc(dist_and_fingerprint_type x) -> dist_and_fingerprint_type { [[nodiscard]] static constexpr auto dist_inc(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
return static_cast<dist_and_fingerprint_type>(x + Bucket::dist_inc); return static_cast<dist_and_fingerprint_type>(x + Bucket::dist_inc);
} }
@@ -1025,10 +977,10 @@ private:
// The goal of mixed_hash is to always produce a high quality 64bit hash. // The goal of mixed_hash is to always produce a high quality 64bit hash.
template <typename K> template <typename K>
[[nodiscard]] constexpr auto mixed_hash(K const& key) const -> uint64_t { [[nodiscard]] constexpr auto mixed_hash(K const& key) const -> std::uint64_t {
if constexpr (is_detected_v<detect_avalanching, Hash>) { if constexpr (is_detected_v<detect_avalanching, Hash>) {
// we know that the hash is good because is_avalanching. // we know that the hash is good because is_avalanching.
if constexpr (sizeof(decltype(m_hash(key))) < sizeof(uint64_t)) { if constexpr (sizeof(decltype(m_hash(key))) < sizeof(std::uint64_t)) {
// 32bit hash and is_avalanching => multiply with a constant to avalanche bits upwards // 32bit hash and is_avalanching => multiply with a constant to avalanche bits upwards
return m_hash(key) * UINT64_C(0x9ddfea08eb382d69); return m_hash(key) * UINT64_C(0x9ddfea08eb382d69);
} else { } else {
@@ -1041,11 +993,11 @@ private:
} }
} }
[[nodiscard]] constexpr auto dist_and_fingerprint_from_hash(uint64_t hash) const -> dist_and_fingerprint_type { [[nodiscard]] constexpr auto dist_and_fingerprint_from_hash(std::uint64_t hash) const -> dist_and_fingerprint_type {
return Bucket::dist_inc | (static_cast<dist_and_fingerprint_type>(hash) & Bucket::fingerprint_mask); return Bucket::dist_inc | (static_cast<dist_and_fingerprint_type>(hash) & Bucket::fingerprint_mask);
} }
[[nodiscard]] constexpr auto bucket_idx_from_hash(uint64_t hash) const -> value_idx_type { [[nodiscard]] constexpr auto bucket_idx_from_hash(std::uint64_t hash) const -> value_idx_type {
return static_cast<value_idx_type>(hash >> m_shifts); return static_cast<value_idx_type>(hash >> m_shifts);
} }
@@ -1079,13 +1031,24 @@ private:
at(m_buckets, place) = bucket; at(m_buckets, place) = bucket;
} }
[[nodiscard]] static constexpr auto calc_num_buckets(uint8_t shifts) -> size_t { void erase_and_shift_down(value_idx_type bucket_idx) {
return (std::min)(max_bucket_count(), size_t{1} << (64U - shifts)); // shift down until either empty or an element with correct spot is found
auto next_bucket_idx = next(bucket_idx);
while (at(m_buckets, next_bucket_idx).m_dist_and_fingerprint >= Bucket::dist_inc * 2) {
auto& next_bucket = at(m_buckets, next_bucket_idx);
at(m_buckets, bucket_idx) = {dist_dec(next_bucket.m_dist_and_fingerprint), next_bucket.m_value_idx};
bucket_idx = std::exchange(next_bucket_idx, next(next_bucket_idx));
}
at(m_buckets, bucket_idx) = {};
} }
[[nodiscard]] constexpr auto calc_shifts_for_size(size_t s) const -> uint8_t { [[nodiscard]] static constexpr auto calc_num_buckets(std::uint8_t shifts) -> std::size_t {
return (std::min)(max_bucket_count(), std::size_t{1} << (64U - shifts));
}
[[nodiscard]] constexpr auto calc_shifts_for_size(std::size_t s) const -> std::uint8_t {
auto shifts = initial_shifts; auto shifts = initial_shifts;
while (shifts > 0 && static_cast<size_t>(static_cast<float>(calc_num_buckets(shifts)) * max_load_factor()) < s) { while (shifts > 0 && static_cast<std::size_t>(static_cast<float>(calc_num_buckets(shifts)) * max_load_factor()) < s) {
--shifts; --shifts;
} }
return shifts; return shifts;
@@ -1130,7 +1093,7 @@ private:
if constexpr (has_reserve<bucket_container_type>) { if constexpr (has_reserve<bucket_container_type>) {
m_buckets.reserve(num_buckets); m_buckets.reserve(num_buckets);
} }
for (size_t i = m_buckets.size(); i < num_buckets; ++i) { for (std::size_t i = m_buckets.size(); i < num_buckets; ++i) {
m_buckets.emplace_back(); m_buckets.emplace_back();
} }
} else { } else {
@@ -1183,15 +1146,7 @@ private:
template <typename Op> template <typename Op>
void do_erase(value_idx_type bucket_idx, Op handle_erased_value) { void do_erase(value_idx_type bucket_idx, Op handle_erased_value) {
auto const value_idx_to_remove = at(m_buckets, bucket_idx).m_value_idx; auto const value_idx_to_remove = at(m_buckets, bucket_idx).m_value_idx;
erase_and_shift_down(bucket_idx);
// shift down until either empty or an element with correct spot is found
auto next_bucket_idx = next(bucket_idx);
while (at(m_buckets, next_bucket_idx).m_dist_and_fingerprint >= Bucket::dist_inc * 2) {
at(m_buckets, bucket_idx) = {dist_dec(at(m_buckets, next_bucket_idx).m_dist_and_fingerprint),
at(m_buckets, next_bucket_idx).m_value_idx};
bucket_idx = std::exchange(next_bucket_idx, next(next_bucket_idx));
}
at(m_buckets, bucket_idx) = {};
handle_erased_value(std::move(m_values[value_idx_to_remove])); handle_erased_value(std::move(m_values[value_idx_to_remove]));
// update m_values // update m_values
@@ -1201,9 +1156,7 @@ private:
val = std::move(m_values.back()); val = std::move(m_values.back());
// update the values_idx of the moved entry. No need to play the info game, just look until we find the values_idx // update the values_idx of the moved entry. No need to play the info game, just look until we find the values_idx
auto mh = mixed_hash(get_key(val)); bucket_idx = bucket_idx_from_hash(mixed_hash(get_key(val)));
bucket_idx = bucket_idx_from_hash(mh);
auto const values_idx_back = static_cast<value_idx_type>(m_values.size() - 1); auto const values_idx_back = static_cast<value_idx_type>(m_values.size() - 1);
while (values_idx_back != at(m_buckets, bucket_idx).m_value_idx) { while (values_idx_back != at(m_buckets, bucket_idx).m_value_idx) {
bucket_idx = next(bucket_idx); bucket_idx = next(bucket_idx);
@@ -1214,7 +1167,7 @@ private:
} }
template <typename K, typename Op> template <typename K, typename Op>
auto do_erase_key(K&& key, Op handle_erased_value) -> size_t { // NOLINT(cppcoreguidelines-missing-std-forward) auto do_erase_key(K&& key, Op handle_erased_value) -> std::size_t { // NOLINT(cppcoreguidelines-missing-std-forward)
if (empty()) { if (empty()) {
return 0; return 0;
} }
@@ -1348,7 +1301,7 @@ private:
} }
public: public:
explicit table(size_t bucket_count, explicit table(std::size_t bucket_count,
Hash const& hash = Hash(), Hash const& hash = Hash(),
KeyEqual const& equal = KeyEqual(), KeyEqual const& equal = KeyEqual(),
allocator_type const& alloc_or_container = allocator_type()) allocator_type const& alloc_or_container = allocator_type())
@@ -1367,10 +1320,10 @@ public:
table() table()
: table(0) {} : table(0) {}
table(size_t bucket_count, allocator_type const& alloc) table(std::size_t bucket_count, allocator_type const& alloc)
: table(bucket_count, Hash(), KeyEqual(), alloc) {} : table(bucket_count, Hash(), KeyEqual(), alloc) {}
table(size_t bucket_count, Hash const& hash, allocator_type const& alloc) table(std::size_t bucket_count, Hash const& hash, allocator_type const& alloc)
: table(bucket_count, hash, KeyEqual(), alloc) {} : table(bucket_count, hash, KeyEqual(), alloc) {}
explicit table(allocator_type const& alloc) explicit table(allocator_type const& alloc)
@@ -1415,7 +1368,7 @@ public:
} }
table(std::initializer_list<value_type> ilist, table(std::initializer_list<value_type> ilist,
size_t bucket_count = 0, std::size_t bucket_count = 0,
Hash const& hash = Hash(), Hash const& hash = Hash(),
KeyEqual const& equal = KeyEqual(), KeyEqual const& equal = KeyEqual(),
allocator_type const& alloc = allocator_type()) allocator_type const& alloc = allocator_type())
@@ -1522,15 +1475,15 @@ public:
return m_values.empty(); return m_values.empty();
} }
[[nodiscard]] auto size() const noexcept -> size_t { [[nodiscard]] auto size() const noexcept -> std::size_t {
return m_values.size(); return m_values.size();
} }
[[nodiscard]] static constexpr auto max_size() noexcept -> size_t { [[nodiscard]] static constexpr auto max_size() noexcept -> std::size_t {
if constexpr ((std::numeric_limits<value_idx_type>::max)() == (std::numeric_limits<size_t>::max)()) { if constexpr ((std::numeric_limits<value_idx_type>::max)() == (std::numeric_limits<std::size_t>::max)()) {
return size_t{1} << (sizeof(value_idx_type) * 8 - 1); return std::size_t{1} << (sizeof(value_idx_type) * 8 - 1);
} else { } else {
return size_t{1} << (sizeof(value_idx_type) * 8); return std::size_t{1} << (sizeof(value_idx_type) * 8);
} }
} }
@@ -1787,6 +1740,59 @@ public:
return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first; return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first;
} }
// Replaces the key at the given iterator with new_key. This does not change any other data in the underlying table, so
// all iterators and references remain valid. However, this operation can fail if new_key already exists in the table.
// In that case, returns {iterator to the already existing new_key, false} and no change is made.
//
// In the case of a set, this effectively removes the old key and inserts the new key at the same spot, which is more
// efficient than removing the old key and inserting the new key because it avoids repositioning the last element.
template <typename K>
auto replace_key(iterator it, K&& new_key) -> std::pair<iterator, bool> {
auto const new_key_hash = mixed_hash(new_key);
// first, check if new_key already exists and return if so
auto dist_and_fingerprint = dist_and_fingerprint_from_hash(new_key_hash);
auto bucket_idx = bucket_idx_from_hash(new_key_hash);
while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
auto const& bucket = at(m_buckets, bucket_idx);
if (dist_and_fingerprint == bucket.m_dist_and_fingerprint &&
m_equal(new_key, get_key(m_values[bucket.m_value_idx]))) {
return {begin() + static_cast<difference_type>(bucket.m_value_idx), false};
}
dist_and_fingerprint = dist_inc(dist_and_fingerprint);
bucket_idx = next(bucket_idx);
}
// const_cast is needed because iterator for the set is always const, so adding another get_key overload is not
// feasible.
auto& target_key = const_cast<key_type&>(get_key(*it));
auto const old_key_bucket_idx = bucket_idx_from_hash(mixed_hash(target_key));
// Replace the key before doing any bucket changes. If it throws, no harm done, we are still in a valid state as we
// have not modified any buckets yet.
target_key = std::forward<K>(new_key);
auto const value_idx = static_cast<value_idx_type>(it - begin());
// Find the bucket containing our value_idx. It's guaranteed we find it, so no other stopping condition needed.
bucket_idx = old_key_bucket_idx;
while (value_idx != at(m_buckets, bucket_idx).m_value_idx) {
bucket_idx = next(bucket_idx);
}
erase_and_shift_down(bucket_idx);
// place the new bucket
dist_and_fingerprint = dist_and_fingerprint_from_hash(new_key_hash);
bucket_idx = bucket_idx_from_hash(new_key_hash);
while (dist_and_fingerprint < at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
dist_and_fingerprint = dist_inc(dist_and_fingerprint);
bucket_idx = next(bucket_idx);
}
place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
return {it, true};
}
auto erase(iterator it) -> iterator { auto erase(iterator it) -> iterator {
auto hash = mixed_hash(get_key(*it)); auto hash = mixed_hash(get_key(*it));
auto bucket_idx = bucket_idx_from_hash(hash); auto bucket_idx = bucket_idx_from_hash(hash);
@@ -1851,7 +1857,7 @@ public:
return begin() + idx_first; return begin() + idx_first;
} }
auto erase(Key const& key) -> size_t { auto erase(Key const& key) -> std::size_t {
return do_erase_key(key, [](value_type const& /*unused*/) { return do_erase_key(key, [](value_type const& /*unused*/) {
}); });
} }
@@ -1865,7 +1871,7 @@ public:
} }
template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true> template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
auto erase(K&& key) -> size_t { auto erase(K&& key) -> std::size_t {
return do_erase_key(std::forward<K>(key), [](value_type const& /*unused*/) { return do_erase_key(std::forward<K>(key), [](value_type const& /*unused*/) {
}); });
} }
@@ -1934,12 +1940,12 @@ public:
return try_emplace(std::forward<K>(key)).first->second; return try_emplace(std::forward<K>(key)).first->second;
} }
auto count(Key const& key) const -> size_t { auto count(Key const& key) const -> std::size_t {
return find(key) == end() ? 0 : 1; return find(key) == end() ? 0 : 1;
} }
template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true> template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
auto count(K const& key) const -> size_t { auto count(K const& key) const -> std::size_t {
return find(key) == end() ? 0 : 1; return find(key) == end() ? 0 : 1;
} }
@@ -1994,11 +2000,11 @@ public:
// bucket interface /////////////////////////////////////////////////////// // bucket interface ///////////////////////////////////////////////////////
auto bucket_count() const noexcept -> size_t { // NOLINT(modernize-use-nodiscard) auto bucket_count() const noexcept -> std::size_t { // NOLINT(modernize-use-nodiscard)
return m_buckets.size(); return m_buckets.size();
} }
static constexpr auto max_bucket_count() noexcept -> size_t { // NOLINT(modernize-use-nodiscard) static constexpr auto max_bucket_count() noexcept -> std::size_t { // NOLINT(modernize-use-nodiscard)
return max_size(); return max_size();
} }
@@ -2019,7 +2025,7 @@ public:
} }
} }
void rehash(size_t count) { void rehash(std::size_t count) {
count = (std::min)(count, max_size()); count = (std::min)(count, max_size());
auto shifts = calc_shifts_for_size((std::max)(count, size())); auto shifts = calc_shifts_for_size((std::max)(count, size()));
if (shifts != m_shifts) { if (shifts != m_shifts) {
@@ -2031,7 +2037,7 @@ public:
} }
} }
void reserve(size_t capa) { void reserve(std::size_t capa) {
capa = (std::min)(capa, max_size()); capa = (std::min)(capa, max_size());
if constexpr (has_reserve<value_container_type>) { if constexpr (has_reserve<value_container_type>) {
// std::deque doesn't have reserve(). Make sure we only call when available // std::deque doesn't have reserve(). Make sure we only call when available
@@ -2094,49 +2100,49 @@ public:
} // namespace detail } // namespace detail
ANKERL_UNORDERED_DENSE_EXPORT template <class Key, template <class Key,
class T, class T,
class Hash = hash<Key>, class Hash = hash<Key>,
class KeyEqual = std::equal_to<Key>, class KeyEqual = std::equal_to<Key>,
class AllocatorOrContainer = std::allocator<std::pair<Key, T>>, class AllocatorOrContainer = std::allocator<std::pair<Key, T>>,
class Bucket = bucket_type::standard, class Bucket = bucket_type::standard,
class BucketContainer = detail::default_container_t> class BucketContainer = detail::default_container_t>
using map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, false>; using map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, false>;
ANKERL_UNORDERED_DENSE_EXPORT template <class Key, template <class Key,
class T, class T,
class Hash = hash<Key>, class Hash = hash<Key>,
class KeyEqual = std::equal_to<Key>, class KeyEqual = std::equal_to<Key>,
class AllocatorOrContainer = std::allocator<std::pair<Key, T>>, class AllocatorOrContainer = std::allocator<std::pair<Key, T>>,
class Bucket = bucket_type::standard, class Bucket = bucket_type::standard,
class BucketContainer = detail::default_container_t> class BucketContainer = detail::default_container_t>
using segmented_map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, true>; using segmented_map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, true>;
ANKERL_UNORDERED_DENSE_EXPORT template <class Key, template <class Key,
class Hash = hash<Key>, class Hash = hash<Key>,
class KeyEqual = std::equal_to<Key>, class KeyEqual = std::equal_to<Key>,
class AllocatorOrContainer = std::allocator<Key>, class AllocatorOrContainer = std::allocator<Key>,
class Bucket = bucket_type::standard, class Bucket = bucket_type::standard,
class BucketContainer = detail::default_container_t> class BucketContainer = detail::default_container_t>
using set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, false>; using set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, false>;
ANKERL_UNORDERED_DENSE_EXPORT template <class Key, template <class Key,
class Hash = hash<Key>, class Hash = hash<Key>,
class KeyEqual = std::equal_to<Key>, class KeyEqual = std::equal_to<Key>,
class AllocatorOrContainer = std::allocator<Key>, class AllocatorOrContainer = std::allocator<Key>,
class Bucket = bucket_type::standard, class Bucket = bucket_type::standard,
class BucketContainer = detail::default_container_t> class BucketContainer = detail::default_container_t>
using segmented_set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, true>; using segmented_set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, true>;
# if defined(ANKERL_UNORDERED_DENSE_PMR) # if defined(ANKERL_UNORDERED_DENSE_PMR)
namespace pmr { namespace pmr {
ANKERL_UNORDERED_DENSE_EXPORT template <class Key, template <class Key,
class T, class T,
class Hash = hash<Key>, class Hash = hash<Key>,
class KeyEqual = std::equal_to<Key>, class KeyEqual = std::equal_to<Key>,
class Bucket = bucket_type::standard> class Bucket = bucket_type::standard>
using map = detail::table<Key, using map = detail::table<Key,
T, T,
Hash, Hash,
@@ -2146,11 +2152,11 @@ using map = detail::table<Key,
detail::default_container_t, detail::default_container_t,
false>; false>;
ANKERL_UNORDERED_DENSE_EXPORT template <class Key, template <class Key,
class T, class T,
class Hash = hash<Key>, class Hash = hash<Key>,
class KeyEqual = std::equal_to<Key>, class KeyEqual = std::equal_to<Key>,
class Bucket = bucket_type::standard> class Bucket = bucket_type::standard>
using segmented_map = detail::table<Key, using segmented_map = detail::table<Key,
T, T,
Hash, Hash,
@@ -2160,10 +2166,7 @@ using segmented_map = detail::table<Key,
detail::default_container_t, detail::default_container_t,
true>; true>;
ANKERL_UNORDERED_DENSE_EXPORT template <class Key, template <class Key, class Hash = hash<Key>, class KeyEqual = std::equal_to<Key>, class Bucket = bucket_type::standard>
class Hash = hash<Key>,
class KeyEqual = std::equal_to<Key>,
class Bucket = bucket_type::standard>
using set = detail::table<Key, using set = detail::table<Key,
void, void,
Hash, Hash,
@@ -2173,10 +2176,7 @@ using set = detail::table<Key,
detail::default_container_t, detail::default_container_t,
false>; false>;
ANKERL_UNORDERED_DENSE_EXPORT template <class Key, template <class Key, class Hash = hash<Key>, class KeyEqual = std::equal_to<Key>, class Bucket = bucket_type::standard>
class Hash = hash<Key>,
class KeyEqual = std::equal_to<Key>,
class Bucket = bucket_type::standard>
using segmented_set = detail::table<Key, using segmented_set = detail::table<Key,
void, void,
Hash, Hash,
@@ -2202,20 +2202,20 @@ using segmented_set = detail::table<Key,
namespace std { // NOLINT(cert-dcl58-cpp) namespace std { // NOLINT(cert-dcl58-cpp)
ANKERL_UNORDERED_DENSE_EXPORT template <class Key, template <class Key,
class T, class T,
class Hash, class Hash,
class KeyEqual, class KeyEqual,
class AllocatorOrContainer, class AllocatorOrContainer,
class Bucket, class Bucket,
class Pred, class Pred,
class BucketContainer, class BucketContainer,
bool IsSegmented> bool IsSegmented>
// NOLINTNEXTLINE(cert-dcl58-cpp) // NOLINTNEXTLINE(cert-dcl58-cpp)
auto erase_if( auto erase_if(
ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, IsSegmented>& ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, IsSegmented>&
map, map,
Pred pred) -> size_t { Pred pred) -> std::size_t {
using map_t = ankerl::unordered_dense::detail:: using map_t = ankerl::unordered_dense::detail::
table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, IsSegmented>; table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, BucketContainer, IsSegmented>;

View File

@@ -18,7 +18,7 @@
#ifndef GDBSUPPORT_UNORDERED_MAP_H #ifndef GDBSUPPORT_UNORDERED_MAP_H
#define GDBSUPPORT_UNORDERED_MAP_H #define GDBSUPPORT_UNORDERED_MAP_H
#include "unordered_dense.h" #include "unordered_dense/unordered_dense.h"
namespace gdb namespace gdb
{ {

View File

@@ -18,7 +18,7 @@
#ifndef GDBSUPPORT_UNORDERED_SET_H #ifndef GDBSUPPORT_UNORDERED_SET_H
#define GDBSUPPORT_UNORDERED_SET_H #define GDBSUPPORT_UNORDERED_SET_H
#include "unordered_dense.h" #include "unordered_dense/unordered_dense.h"
namespace gdb namespace gdb
{ {