raw_hashtable.h 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #ifndef CARBON_COMMON_RAW_HASHTABLE_H_
  5. #define CARBON_COMMON_RAW_HASHTABLE_H_
  6. #include <algorithm>
  7. #include <concepts>
  8. #include <cstddef>
  9. #include <cstring>
  10. #include <iterator>
  11. #include <new>
  12. #include <type_traits>
  13. #include <utility>
  14. #include "common/check.h"
  15. #include "common/hashing.h"
  16. #include "common/raw_hashtable_metadata_group.h"
  17. #include "llvm/Support/Compiler.h"
  18. #include "llvm/Support/MathExtras.h"
  19. // A namespace collecting a set of low-level utilities for building hashtable
  20. // data structures. These should only be used as implementation details of
  21. // higher-level data-structure APIs.
  22. //
  23. // The utilities here use the `hashtable_key_context.h` provided `KeyContext` to
  24. // support the necessary hashtable operations on keys: hashing and comparison.
  25. // This also serves as the customization point for hashtables built on this
  26. // infrastructure for those operations. See that header file for details.
  27. //
  28. // These utilities support hashtables following a *specific* API design pattern,
  29. // and using Small-Size Optimization, or "SSO", when desired. We expect there to
  30. // be three layers to any hashtable design:
  31. //
  32. // - A *view* type: a read-only view of the hashtable contents. This type should
  33. // be a value type and is expected to be passed by-value in APIs. However, it
  34. // will have `const`-reference semantics, much like a `std::string_view`. Note
  35. // that the *entries* will continue to be mutable, it is only the *table* that
  36. // is read-only.
  37. //
  38. // - A *base* type: a base class type of the actual hashtable, which allows
  39. // almost all mutable operations but erases any specific SSO buffer size.
  40. // Because this is a base of the actual hash table, it is designed to be
  41. // passed as a non-`const` reference or pointer.
  42. //
  43. // - A *table* type: the actual hashtable which derives from the base type and
  44. // adds any desired SSO storage buffer. Beyond the physical storage, it also
  45. // allows resetting the table to its initial state & allocated size, as well
  46. // as copying and moving the table.
  47. //
  48. // For complete examples of the API design, see `set.h` for a hashtable-based
  49. // set data structure, and `map.h` for a hashtable-based map data structure.
  50. //
  51. // The hashtable design implemented here has several key invariants and design
  52. // elements that are essential to all three of the types above and the
  53. // functionality they provide.
  54. //
  55. // - The underlying hashtable uses [open addressing], a power-of-two table size,
  56. // and quadratic probing rather than closed addressing and chaining.
  57. //
  58. // [open addressing]: https://en.wikipedia.org/wiki/Open_addressing
  59. //
  60. // - Each _slot_ in the table corresponds to a key, a value, and one byte of
  61. // metadata. Each _entry_ is a key and value. The key and value for an entry
  62. // are stored together.
  63. //
  64. // - The allocated storage is organized into an array of metadata bytes followed
  65. // by an array of entry storage.
  66. //
  67. // - The metadata byte corresponding to each entry marks that entry is either
  68. // empty, deleted, or present. When present, a 7-bit tag is also stored using
  69. // another 7 bits from the hash of the entry key.
  70. //
  71. // - The storage for an entry is an internal type that should not be exposed to
  72. // users, and instead only the underlying keys and values.
  73. //
  74. // - The hash addressing and probing occurs over *groups* of slots rather than
  75. // individual entries. When inserting a new entry, it can be added to the
  76. // group it hashes to as long it is not full, and can even replace a slot with
  77. // a tombstone indicating a previously deleted entry. Only when the group is
  78. // full will it look at the next group in the probe sequence. As a result,
  79. // there may be entries in a group where a different group is the start of
  80. // that entry's probe sequence. Also, when performing a lookup, every group in
  81. // the probe sequence must be inspected for the lookup key until it is found
  82. // or the group has an empty slot.
  83. //
  84. // - Groups are scanned rapidly using the one-byte metadata for each entry in
  85. // the group and CPU instructions that allow comparing all of the metadata for
  86. // a group in parallel. For more details on the metadata group encoding and
  87. // scanning, see `raw_hashtable_metadata_group.h`.
  88. //
  89. // - `GroupSize` is a platform-specific relatively small power of two that fits
  90. // in some hardware register. However, `MaxGroupSize` is provided as a
  91. // portable max that is also a power of two. The table storage, whether
  92. // provided by an SSO buffer or allocated, is required to be a multiple of
  93. // `MaxGroupSize` to keep the requirement portable but sufficient for all
  94. // platforms.
  95. //
  96. // - There is *always* an allocated table of some multiple of `MaxGroupSize`.
  97. // This allows accesses to be branchless. When heap allocated, we pro-actively
  98. // allocate at least a minimum heap size table. When there is a small-size
  99. // optimization (SSO) buffer, that provides the initial allocation.
  100. //
  101. // - The table performs a minimal amount of bookkeeping that limits the APIs it
  102. // can support:
  103. // - `alloc_size` is the size of the table *allocated* (not *used*), and is
  104. // always a power of 2 at least as big as `MinAllocatedSize`.
  105. // - `storage` is a pointer to the storage for the `alloc_size` slots of the
  106. // table, and never null.
  107. // - `small_alloc_size` is the maximum `alloc_size` where the table is stored
  108. // in the object itself instead of separately on the heap. In this case,
  109. // `storage` points to `small_storage_`.
  110. // - `growth_budget` is the number of entries that may be added before the
  111. // table allocation is doubled. It is always
  112. // `GrowthThresholdForAllocSize(alloc_size)` minus the number of
  113. // non-empty (filled or deleted) slots. If it ever falls to 0, the table
  114. // is grown to keep it greater than 0.
  115. // There is also the "moved-from" state where the table may only be
  116. // reinitialized or destroyed where the `alloc_size` is 0 and `storage` is
  117. // null. Since it doesn't track the exact number of filled entries in a table,
  118. // it doesn't support a container-style `size` API.
  119. //
  120. // - There is no direct iterator support because of the complexity of embedding
  121. // the group-based metadata scanning into an iterator model. Instead, there is
  122. // just a for-each method that is passed a lambda to observe all entries. The
  123. // order of this observation is also not guaranteed.
  124. namespace Carbon::RawHashtable {
  125. // Which prefetch strategies to enable can be controlled via macros to enable
  126. // doing experiments.
  127. //
  128. // Currently, benchmarking on both modern AMD and ARM CPUs seems to indicate
  129. // that the entry group prefetching is more beneficial than metadata, but that
  130. // benefit is degraded when enabling them both. This determined our current
  131. // default of no metadata prefetch but enabled entry group prefetch.
  132. //
  133. // Override these by defining them as part of the build explicitly to either `0`
  134. // or `1`. If left undefined, the defaults will be supplied.
  135. #ifndef CARBON_ENABLE_PREFETCH_METADATA
  136. #define CARBON_ENABLE_PREFETCH_METADATA 0
  137. #endif
  138. #ifndef CARBON_ENABLE_PREFETCH_ENTRY_GROUP
  139. #define CARBON_ENABLE_PREFETCH_ENTRY_GROUP 1
  140. #endif
  141. // If allocating storage, allocate a minimum of one cacheline of group metadata
  142. // or a minimum of one group, whichever is larger.
  143. constexpr ssize_t MinAllocatedSize = std::max<ssize_t>(64, MaxGroupSize);
  144. // An entry in the hashtable storage of a `KeyT` and `ValueT` object.
  145. //
  146. // Allows manual construction, destruction, and access to these values so we can
  147. // create arrays af the entries prior to populating them with actual keys and
  148. // values.
  149. template <typename KeyT, typename ValueT>
  150. struct StorageEntry {
  151. static constexpr bool IsTriviallyDestructible =
  152. std::is_trivially_destructible_v<KeyT> &&
  153. std::is_trivially_destructible_v<ValueT>;
  154. static constexpr bool IsTriviallyRelocatable =
  155. IsTriviallyDestructible && std::is_trivially_move_constructible_v<KeyT> &&
  156. std::is_trivially_move_constructible_v<ValueT>;
  157. auto key() const -> const KeyT& {
  158. // Ensure we don't need more alignment than available. Inside a method body
  159. // to apply to the complete type.
  160. static_assert(
  161. alignof(StorageEntry) <= MinAllocatedSize,
  162. "The minimum allocated size turns into the alignment of our array of "
  163. "storage entries as they follow the metadata byte array.");
  164. return *std::launder(reinterpret_cast<const KeyT*>(&key_storage));
  165. }
  166. auto key() -> KeyT& {
  167. return const_cast<KeyT&>(const_cast<const StorageEntry*>(this)->key());
  168. }
  169. auto value() const -> const ValueT& {
  170. return *std::launder(reinterpret_cast<const ValueT*>(&value_storage));
  171. }
  172. auto value() -> ValueT& {
  173. return const_cast<ValueT&>(const_cast<const StorageEntry*>(this)->value());
  174. }
  175. // We handle destruction and move manually as we only want to expose distinct
  176. // `KeyT` and `ValueT` subobjects to user code that may need to do in-place
  177. // construction. As a consequence, this struct only provides the storage and
  178. // we have to manually manage the construction, move, and destruction of the
  179. // objects.
  180. auto Destroy() -> void {
  181. static_assert(!IsTriviallyDestructible,
  182. "Should never instantiate when trivial!");
  183. key().~KeyT();
  184. value().~ValueT();
  185. }
  186. auto CopyFrom(const StorageEntry& entry) -> void {
  187. if constexpr (IsTriviallyRelocatable) {
  188. memcpy(this, &entry, sizeof(StorageEntry));
  189. } else {
  190. new (&key_storage) KeyT(entry.key());
  191. new (&value_storage) ValueT(entry.value());
  192. }
  193. }
  194. // Move from an expiring entry and destroy that entry's key and value.
  195. // Optimizes to directly use `memcpy` when correct.
  196. auto MoveFrom(StorageEntry&& entry) -> void {
  197. if constexpr (IsTriviallyRelocatable) {
  198. memcpy(this, &entry, sizeof(StorageEntry));
  199. } else {
  200. new (&key_storage) KeyT(std::move(entry.key()));
  201. entry.key().~KeyT();
  202. new (&value_storage) ValueT(std::move(entry.value()));
  203. entry.value().~ValueT();
  204. }
  205. }
  206. alignas(KeyT) std::byte key_storage[sizeof(KeyT)];
  207. alignas(ValueT) std::byte value_storage[sizeof(ValueT)];
  208. };
  209. // A specialization of the storage entry for sets without a distinct value type.
  210. // Somewhat duplicative with the key-value version, but C++ specialization makes
  211. // doing better difficult.
  212. template <typename KeyT>
  213. struct StorageEntry<KeyT, void> {
  214. static constexpr bool IsTriviallyDestructible =
  215. std::is_trivially_destructible_v<KeyT>;
  216. static constexpr bool IsTriviallyRelocatable =
  217. IsTriviallyDestructible && std::is_trivially_move_constructible_v<KeyT>;
  218. auto key() const -> const KeyT& {
  219. // Ensure we don't need more alignment than available.
  220. static_assert(
  221. alignof(StorageEntry) <= MinAllocatedSize,
  222. "The minimum allocated size turns into the alignment of our array of "
  223. "storage entries as they follow the metadata byte array.");
  224. return *std::launder(reinterpret_cast<const KeyT*>(&key_storage));
  225. }
  226. auto key() -> KeyT& {
  227. return const_cast<KeyT&>(const_cast<const StorageEntry*>(this)->key());
  228. }
  229. auto Destroy() -> void {
  230. static_assert(!IsTriviallyDestructible,
  231. "Should never instantiate when trivial!");
  232. key().~KeyT();
  233. }
  234. auto CopyFrom(const StorageEntry& entry) -> void {
  235. if constexpr (IsTriviallyRelocatable) {
  236. memcpy(this, &entry, sizeof(StorageEntry));
  237. } else {
  238. new (&key_storage) KeyT(entry.key());
  239. }
  240. }
  241. auto MoveFrom(StorageEntry&& entry) -> void {
  242. if constexpr (IsTriviallyRelocatable) {
  243. memcpy(this, &entry, sizeof(StorageEntry));
  244. } else {
  245. new (&key_storage) KeyT(std::move(entry.key()));
  246. entry.key().~KeyT();
  247. }
  248. }
  249. alignas(KeyT) std::byte key_storage[sizeof(KeyT)];
  250. };
  251. struct Metrics {
  252. // How many keys are present in the table.
  253. ssize_t key_count = 0;
  254. // How many slots of the table are reserved due to deleted markers required to
  255. // preserve probe sequences.
  256. ssize_t deleted_count = 0;
  257. // How many bytes of allocated storage are used by the table. Note, does not
  258. // include the table object or any small-size buffer.
  259. ssize_t storage_bytes = 0;
  260. // How many keys have required probing beyond the initial group. These are the
  261. // keys with a probe distance > 0.
  262. ssize_t probed_key_count = 0;
  263. // The probe distance averaged over every key. If every key is in its initial
  264. // group, this will be zero as no keys will have a larger probe distance. In
  265. // general, we want this to be as close to zero as possible.
  266. double probe_avg_distance = 0.0;
  267. // The maximum probe distance found for a single key in the table.
  268. ssize_t probe_max_distance = 0;
  269. // The average number of probing comparisons required to locate a specific key
  270. // in the table. This is how many comparisons are required *before* the key is
  271. // located, or the *failed* comparisons. We always have to do one successful
  272. // comparison at the end. This successful comparison isn't counted because
  273. // that focuses this metric on the overhead the table is introducing, and
  274. // keeps a "perfect" table with an average of `0.0` here similar to the
  275. // perfect average of `0.0` average probe distance.
  276. double probe_avg_compares = 0.0;
  277. // The maximum number of probing comparisons required to locate a specific
  278. // key in the table.
  279. ssize_t probe_max_compares = 0;
  280. };
  281. // A placeholder empty type used to model pointers to the allocated buffer of
  282. // storage.
  283. //
  284. // The allocated storage doesn't have a meaningful static layout -- it consists
  285. // of an array of metadata groups followed by an array of storage entries.
  286. // However, we want to be able to mark pointers to this and so use pointers to
  287. // this placeholder type as that signifier.
  288. //
  289. // This is a complete, empty type so that it can be used as a base class of a
  290. // specific concrete storage type for compile-time sized storage.
  291. struct Storage {};
  292. // Forward declaration to support friending, see the definition below.
  293. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  294. class BaseImpl;
  295. // Implementation helper for defining a read-only view type for a hashtable.
  296. //
  297. // A specific user-facing hashtable view type should derive privately from this
  298. // type, and forward the implementation of its interface to functions in this
  299. // type.
  300. //
  301. // The methods available to user-facing hashtable types are `protected`, and
  302. // where they are expected to directly map to a public API, named with an
  303. // `Impl`. The suffix naming ensures types don't `using` in these low-level APIs
  304. // but declare their own and implement them by forwarding to these APIs. We
  305. // don't want users to have to read these implementation details to understand
  306. // their container's API, so none of these methods should be `using`-ed into the
  307. // user facing types.
  308. //
  309. // Some of the types are just convenience aliases and aren't important to
  310. // surface as part of the user-facing type API for readers and so those are
  311. // reasonable to add via a `using`.
  312. //
  313. // Some methods are used by other parts of the raw hashtable implementation.
  314. // Those are kept `private` and where necessary the other components of the raw
  315. // hashtable implementation are friended to give access to them.
  316. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  317. class ViewImpl {
  318. protected:
  319. using KeyT = InputKeyT;
  320. using ValueT = InputValueT;
  321. using KeyContextT = InputKeyContextT;
  322. using EntryT = StorageEntry<KeyT, ValueT>;
  323. using MetricsT = Metrics;
  324. friend class BaseImpl<KeyT, ValueT, KeyContextT>;
  325. template <typename InputBaseT, ssize_t SmallSize>
  326. friend class TableImpl;
  327. // Make more-`const` types friends to enable conversions that add `const`.
  328. friend class ViewImpl<const KeyT, ValueT, KeyContextT>;
  329. friend class ViewImpl<KeyT, const ValueT, KeyContextT>;
  330. friend class ViewImpl<const KeyT, const ValueT, KeyContextT>;
  331. ViewImpl() = default;
  332. // Support adding `const` to either key or value type of some other view.
  333. template <typename OtherKeyT, typename OtherValueT>
  334. // NOLINTNEXTLINE(google-explicit-constructor)
  335. ViewImpl(ViewImpl<OtherKeyT, OtherValueT, KeyContextT> other_view)
  336. requires(std::same_as<KeyT, OtherKeyT> ||
  337. std::same_as<KeyT, const OtherKeyT>) &&
  338. (std::same_as<ValueT, OtherValueT> ||
  339. std::same_as<ValueT, const OtherValueT>)
  340. : alloc_size_(other_view.alloc_size_), storage_(other_view.storage_) {}
  341. // Looks up an entry in the hashtable and returns its address or null if not
  342. // present.
  343. template <typename LookupKeyT>
  344. auto LookupEntry(LookupKeyT lookup_key, KeyContextT key_context) const
  345. -> EntryT*;
  346. // Calls `entry_callback` for each entry in the hashtable. All the entries
  347. // within a specific group are visited first, and then `group_callback` is
  348. // called on the group itself. The `group_callback` is typically only used by
  349. // the internals of the hashtable.
  350. template <typename EntryCallbackT, typename GroupCallbackT>
  351. auto ForEachEntry(EntryCallbackT entry_callback,
  352. GroupCallbackT group_callback) const -> void;
  353. // Returns a collection of informative metrics on the the current state of the
  354. // table, useful for performance analysis. These include relatively slow to
  355. // compute metrics requiring deep inspection of the table's state.
  356. auto ComputeMetricsImpl(KeyContextT key_context) const -> MetricsT;
  357. private:
  358. ViewImpl(ssize_t alloc_size, Storage* storage)
  359. : alloc_size_(alloc_size), storage_(storage) {}
  360. // Computes the offset from the metadata array to the entries array for a
  361. // given size. This is trivial, but we use this routine to enforce invariants
  362. // on the sizes.
  363. static constexpr auto EntriesOffset(ssize_t alloc_size) -> ssize_t {
  364. CARBON_DCHECK(llvm::isPowerOf2_64(alloc_size),
  365. "Size must be a power of two for a hashed buffer!");
  366. // The size is always a power of two. We prevent any too-small sizes so it
  367. // being a power of two provides the needed alignment. As a result, the
  368. // offset is exactly the size. We validate this here to catch alignment bugs
  369. // early.
  370. CARBON_DCHECK(static_cast<uint64_t>(alloc_size) ==
  371. llvm::alignTo<alignof(EntryT)>(alloc_size));
  372. return alloc_size;
  373. }
  374. // Compute the allocated table's byte size.
  375. static constexpr auto AllocByteSize(ssize_t alloc_size) -> ssize_t {
  376. return EntriesOffset(alloc_size) + sizeof(EntryT) * alloc_size;
  377. }
  378. auto metadata() const -> uint8_t* {
  379. return reinterpret_cast<uint8_t*>(storage_);
  380. }
  381. auto entries() const -> EntryT* {
  382. return reinterpret_cast<EntryT*>(reinterpret_cast<std::byte*>(storage_) +
  383. EntriesOffset(alloc_size_));
  384. }
  385. // Prefetch the metadata prior to probing. This is to overlap any of the
  386. // memory access latency we can with the hashing of a key or other
  387. // latency-bound operation prior to probing.
  388. auto PrefetchMetadata() const -> void {
  389. if constexpr (CARBON_ENABLE_PREFETCH_METADATA) {
  390. // Prefetch with a "low" temporal locality as we're primarily expecting a
  391. // brief use of the metadata and then to return to application code.
  392. __builtin_prefetch(metadata(), /*read*/ 0, /*low-locality*/ 1);
  393. }
  394. }
  395. // Prefetch an entry. This prefetches for read as it is primarily expected to
  396. // be used in the probing path, and writing afterwards isn't especially slowed
  397. // down. We don't want to synthesize writes unless we *know* we're going to
  398. // write.
  399. static auto PrefetchEntryGroup(const EntryT* entry_group) -> void {
  400. if constexpr (CARBON_ENABLE_PREFETCH_ENTRY_GROUP) {
  401. // Prefetch with a "low" temporal locality as we're primarily expecting a
  402. // brief use of the entries and then to return to application code.
  403. __builtin_prefetch(entry_group, /*read*/ 0, /*low-locality*/ 1);
  404. }
  405. }
  406. ssize_t alloc_size_;
  407. Storage* storage_;
  408. };
  409. // Implementation helper for defining a read-write base type for a hashtable
  410. // that type-erases any SSO buffer.
  411. //
  412. // A specific user-facing hashtable base type should derive using *`protected`*
  413. // inheritance from this type, and forward the implementation of its interface
  414. // to functions in this type.
  415. //
  416. // Other than the use of `protected` inheritance, the patterns for this type,
  417. // and how to build user-facing hashtable base types from it, mirror those of
  418. // `ViewImpl`. See its documentation for more details.
  419. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  420. class BaseImpl {
  421. protected:
  422. using KeyT = InputKeyT;
  423. using ValueT = InputValueT;
  424. using KeyContextT = InputKeyContextT;
  425. using ViewImplT = ViewImpl<KeyT, ValueT, KeyContextT>;
  426. using EntryT = typename ViewImplT::EntryT;
  427. using MetricsT = typename ViewImplT::MetricsT;
  428. BaseImpl(int small_alloc_size, Storage* small_storage)
  429. : small_alloc_size_(small_alloc_size) {
  430. CARBON_CHECK(small_alloc_size >= 0);
  431. Construct(small_storage);
  432. }
  433. // Only used for copying and moving, and leaves storage uninitialized.
  434. BaseImpl(ssize_t alloc_size, int growth_budget, int small_alloc_size)
  435. : view_impl_(alloc_size, nullptr),
  436. growth_budget_(growth_budget),
  437. small_alloc_size_(small_alloc_size) {}
  438. // Destruction must be handled by the table where it can destroy entries in
  439. // any small buffer, so make the base destructor protected but defaulted here.
  440. ~BaseImpl() = default;
  441. // NOLINTNEXTLINE(google-explicit-constructor): Designed to implicitly decay.
  442. operator ViewImplT() const { return view_impl(); }
  443. auto view_impl() const -> ViewImplT { return view_impl_; }
  444. // Looks up the provided key in the hashtable. If found, returns a pointer to
  445. // that entry and `false`.
  446. //
  447. // If not found, will locate an empty entry for inserting into, set the
  448. // metadata for that entry, and return a pointer to the entry and `true`. When
  449. // necessary, this will grow the hashtable to cause there to be sufficient
  450. // empty entries.
  451. template <typename LookupKeyT>
  452. auto InsertImpl(LookupKeyT lookup_key, KeyContextT key_context)
  453. -> std::pair<EntryT*, bool>;
  454. // Grow the table to specific allocation size.
  455. //
  456. // This will grow the the table if necessary for it to have an allocation size
  457. // of `target_alloc_size` which must be a power of two. Note that this will
  458. // not allow that many keys to be inserted into the hashtable, but a smaller
  459. // number based on the load factor. If a specific number of insertions need to
  460. // be achieved without triggering growth, use the `GrowForInsertCountImpl`
  461. // method.
  462. auto GrowToAllocSizeImpl(ssize_t target_alloc_size, KeyContextT key_context)
  463. -> void;
  464. // Grow the table to allow inserting the specified number of keys.
  465. auto GrowForInsertCountImpl(ssize_t count, KeyContextT key_context) -> void;
  466. // Looks up the entry in the hashtable, and if found destroys the entry and
  467. // returns `true`. If not found, returns `false`.
  468. //
  469. // Does not release any memory, just leaves a tombstone behind so this entry
  470. // cannot be found and the slot can in theory be re-used.
  471. template <typename LookupKeyT>
  472. auto EraseImpl(LookupKeyT lookup_key, KeyContextT key_context) -> bool;
  473. // Erases all entries in the hashtable but leaves the allocated storage.
  474. auto ClearImpl() -> void;
  475. private:
  476. template <typename InputBaseT, ssize_t SmallSize>
  477. friend class TableImpl;
  478. static constexpr ssize_t Alignment = std::max<ssize_t>(
  479. alignof(MetadataGroup), alignof(StorageEntry<KeyT, ValueT>));
  480. // Implementation of inline small storage for the provided key type, value
  481. // type, and small size. Specialized for a zero small size to be an empty
  482. // struct.
  483. template <ssize_t SmallSize>
  484. struct SmallStorage : Storage {
  485. alignas(Alignment) uint8_t metadata[SmallSize];
  486. mutable StorageEntry<KeyT, ValueT> entries[SmallSize];
  487. };
  488. // Specialized storage with no inline buffer to avoid any extra alignment.
  489. template <>
  490. struct SmallStorage<0> {};
  491. static auto Allocate(ssize_t alloc_size) -> Storage*;
  492. static auto Deallocate(Storage* storage, ssize_t alloc_size) -> void;
  493. auto growth_budget() const -> ssize_t { return growth_budget_; }
  494. auto alloc_size() const -> ssize_t { return view_impl_.alloc_size_; }
  495. auto alloc_size() -> ssize_t& { return view_impl_.alloc_size_; }
  496. auto storage() const -> Storage* { return view_impl_.storage_; }
  497. auto storage() -> Storage*& { return view_impl_.storage_; }
  498. auto metadata() const -> uint8_t* { return view_impl_.metadata(); }
  499. auto entries() const -> EntryT* { return view_impl_.entries(); }
  500. auto small_alloc_size() const -> ssize_t {
  501. return static_cast<unsigned>(small_alloc_size_);
  502. }
  503. auto is_small() const -> bool {
  504. CARBON_DCHECK(alloc_size() >= small_alloc_size());
  505. return alloc_size() == small_alloc_size();
  506. }
  507. // Wrapper to call `ViewImplT::PrefetchStorage`, see that method for details.
  508. auto PrefetchStorage() const -> void { view_impl_.PrefetchMetadata(); }
  509. auto Construct(Storage* small_storage) -> void;
  510. auto Destroy() -> void;
  511. auto CopySlotsFrom(const BaseImpl& arg) -> void;
  512. auto MoveFrom(BaseImpl&& arg, Storage* small_storage) -> void;
  513. auto InsertIntoEmpty(HashCode hash) -> EntryT*;
  514. static auto ComputeNextAllocSize(ssize_t old_alloc_size) -> ssize_t;
  515. static auto GrowthThresholdForAllocSize(ssize_t alloc_size) -> ssize_t;
  516. auto GrowToNextAllocSize(KeyContextT key_context) -> void;
  517. auto GrowAndInsert(HashCode hash, KeyContextT key_context) -> EntryT*;
  518. ViewImplT view_impl_;
  519. int growth_budget_;
  520. int small_alloc_size_;
  521. };
  522. // Implementation helper for defining a hashtable type with an SSO buffer.
  523. //
  524. // A specific user-facing hashtable should derive privately from this
  525. // type, and forward the implementation of its interface to functions in this
  526. // type. It should provide the corresponding user-facing hashtable base type as
  527. // the `InputBaseT` type parameter (rather than a key/value pair), and this type
  528. // will in turn derive from that provided base type. This allows derived-to-base
  529. // conversion from the user-facing hashtable type to the user-facing hashtable
  530. // base type. And it does so keeping the inheritance linear. The resulting
  531. // linear inheritance hierarchy for a `Map<K, T>` type will look like:
  532. //
  533. // Map<K, T>
  534. // ↓
  535. // TableImpl<MapBase<K, T>>
  536. // ↓
  537. // MapBase<K, T>
  538. // ↓
  539. // BaseImpl<K, T>
  540. //
  541. // Other than this inheritance technique, the patterns for this type, and how to
  542. // build user-facing hashtable types from it, mirror those of `ViewImpl`. See
  543. // its documentation for more details.
  544. template <typename InputBaseT, ssize_t SmallSize>
  545. class TableImpl : public InputBaseT {
  546. protected:
  547. using BaseT = InputBaseT;
  548. TableImpl() : BaseT(SmallSize, small_storage()) {}
  549. TableImpl(const TableImpl& arg);
  550. TableImpl(TableImpl&& arg) noexcept;
  551. auto operator=(const TableImpl& arg) -> TableImpl&;
  552. auto operator=(TableImpl&& arg) noexcept -> TableImpl&;
  553. ~TableImpl();
  554. // Resets the hashtable to its initial state, clearing all entries and
  555. // releasing all memory. If the hashtable had an SSO buffer, that is restored
  556. // as the storage. Otherwise, a minimum sized table storage is allocated.
  557. auto ResetImpl() -> void;
  558. private:
  559. using KeyT = BaseT::KeyT;
  560. using ValueT = BaseT::ValueT;
  561. using EntryT = BaseT::EntryT;
  562. using SmallStorage = BaseT::template SmallStorage<SmallSize>;
  563. auto small_storage() const -> Storage*;
  564. auto SetUpStorage() -> void;
  565. [[no_unique_address]] mutable SmallStorage small_storage_;
  566. };
  567. ////////////////////////////////////////////////////////////////////////////////
  568. //
  569. // Only implementation details below this point.
  570. //
  571. ////////////////////////////////////////////////////////////////////////////////
  572. // Computes a seed that provides a small amount of entropy from ASLR where
  573. // available with minimal cost. The priority is speed, and this computes the
  574. // entropy in a way that doesn't require loading from memory, merely accessing
  575. // entropy already available without accessing memory.
  576. inline auto ComputeSeed() -> uint64_t {
  577. // A global variable whose address is used as a seed. This allows ASLR to
  578. // introduce some variation in hashtable ordering when enabled via the code
  579. // model for globals.
  580. extern volatile std::byte global_addr_seed;
  581. return reinterpret_cast<uint64_t>(&global_addr_seed);
  582. }
  583. inline auto ComputeProbeMaskFromSize(ssize_t size) -> size_t {
  584. CARBON_DCHECK(llvm::isPowerOf2_64(size),
  585. "Size must be a power of two for a hashed buffer!");
  586. // Since `size` is a power of two, we can make sure the probes are less
  587. // than `size` by making the mask `size - 1`. We also mask off the low
  588. // bits so the probes are a multiple of the size of the groups of entries.
  589. return (size - 1) & ~GroupMask;
  590. }
  591. // This class handles building a sequence of probe indices from a given
  592. // starting point, including both the quadratic growth and masking the index
  593. // to stay within the bucket array size. The starting point doesn't need to be
  594. // clamped to the size ahead of time (or even be positive), we will do it
  595. // internally.
  596. //
  597. // For reference on quadratic probing:
  598. // https://en.wikipedia.org/wiki/Quadratic_probing
  599. //
  600. // We compute the quadratic probe index incrementally, but we can also compute
  601. // it mathematically and will check that the incremental result matches our
  602. // mathematical expectation. We use the quadratic probing formula of:
  603. //
  604. // p(start, step) = (start + (step + step^2) / 2) (mod size / GroupSize)
  605. //
  606. // However, we compute it incrementally and scale all the variables by the group
  607. // size so it can be used as an index without an additional multiplication.
  608. class ProbeSequence {
  609. public:
  610. ProbeSequence(ssize_t start, ssize_t size) {
  611. mask_ = ComputeProbeMaskFromSize(size);
  612. p_ = start & mask_;
  613. #ifndef NDEBUG
  614. start_ = start & mask_;
  615. size_ = size;
  616. #endif
  617. }
  618. void Next() {
  619. step_ += GroupSize;
  620. p_ = (p_ + step_) & mask_;
  621. #ifndef NDEBUG
  622. // Verify against the quadratic formula we expect to be following by scaling
  623. // everything down by `GroupSize`.
  624. CARBON_DCHECK(
  625. (p_ / GroupSize) ==
  626. ((start_ / GroupSize +
  627. (step_ / GroupSize + (step_ / GroupSize) * (step_ / GroupSize)) /
  628. 2) %
  629. (size_ / GroupSize)),
  630. "Index in probe sequence does not match the expected formula.");
  631. CARBON_DCHECK(step_ < size_,
  632. "We necessarily visit all groups, so we can't have more "
  633. "probe steps than groups.");
  634. #endif
  635. }
  636. auto index() const -> ssize_t { return p_; }
  637. private:
  638. ssize_t step_ = 0;
  639. size_t mask_;
  640. ssize_t p_;
  641. #ifndef NDEBUG
  642. ssize_t start_;
  643. ssize_t size_;
  644. #endif
  645. };
  646. // TODO: Evaluate keeping this outlined to see if macro benchmarks observe the
  647. // same perf hit as micro benchmarks.
  648. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  649. template <typename LookupKeyT>
  650. auto ViewImpl<InputKeyT, InputValueT, InputKeyContextT>::LookupEntry(
  651. LookupKeyT lookup_key, KeyContextT key_context) const -> EntryT* {
  652. PrefetchMetadata();
  653. ssize_t local_size = alloc_size_;
  654. CARBON_DCHECK(local_size > 0);
  655. uint8_t* local_metadata = metadata();
  656. HashCode hash = key_context.HashKey(lookup_key, ComputeSeed());
  657. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  658. EntryT* local_entries = entries();
  659. // Walk through groups of entries using a quadratic probe starting from
  660. // `hash_index`.
  661. ProbeSequence s(hash_index, local_size);
  662. do {
  663. ssize_t group_index = s.index();
  664. // Load the group's metadata and prefetch the entries for this group. The
  665. // prefetch here helps hide key access latency while we're matching the
  666. // metadata.
  667. MetadataGroup g = MetadataGroup::Load(local_metadata, group_index);
  668. EntryT* group_entries = &local_entries[group_index];
  669. PrefetchEntryGroup(group_entries);
  670. // For each group, match the tag against the metadata to extract the
  671. // potentially matching entries within the group.
  672. auto metadata_matched_range = g.Match(tag);
  673. if (LLVM_LIKELY(metadata_matched_range)) {
  674. // If any entries in this group potentially match based on their metadata,
  675. // walk each candidate and compare its key to see if we have definitively
  676. // found a match.
  677. auto byte_it = metadata_matched_range.begin();
  678. auto byte_end = metadata_matched_range.end();
  679. do {
  680. EntryT* entry = byte_it.index_ptr(group_entries);
  681. if (LLVM_LIKELY(key_context.KeyEq(lookup_key, entry->key()))) {
  682. __builtin_assume(entry != nullptr);
  683. return entry;
  684. }
  685. ++byte_it;
  686. } while (LLVM_UNLIKELY(byte_it != byte_end));
  687. }
  688. // We failed to find a matching entry in this bucket, so check if there are
  689. // empty slots as that indicates we're done probing -- no later probed index
  690. // could have a match.
  691. auto empty_byte_matched_range = g.MatchEmpty();
  692. if (LLVM_LIKELY(empty_byte_matched_range)) {
  693. return nullptr;
  694. }
  695. s.Next();
  696. // We use a weird construct of an "unlikely" condition of `true`. The goal
  697. // is to get the compiler to not prioritize the back edge of the loop for
  698. // code layout, and in at least some tests this seems to be an effective
  699. // construct for achieving this.
  700. } while (LLVM_UNLIKELY(true));
  701. }
  702. // Note that we force inlining here because we expect to be called with lambdas
  703. // that will in turn be inlined to form the loop body. We don't want function
  704. // boundaries within the loop for performance, and recognizing the degree of
  705. // simplification from inlining these callbacks may be difficult to
  706. // automatically recognize.
  707. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  708. template <typename EntryCallbackT, typename GroupCallbackT>
  709. [[clang::always_inline]] auto
  710. ViewImpl<InputKeyT, InputValueT, InputKeyContextT>::ForEachEntry(
  711. EntryCallbackT entry_callback, GroupCallbackT group_callback) const
  712. -> void {
  713. uint8_t* local_metadata = metadata();
  714. EntryT* local_entries = entries();
  715. ssize_t local_size = alloc_size_;
  716. for (ssize_t group_index = 0; group_index < local_size;
  717. group_index += GroupSize) {
  718. auto g = MetadataGroup::Load(local_metadata, group_index);
  719. auto present_matched_range = g.MatchPresent();
  720. if (!present_matched_range) {
  721. continue;
  722. }
  723. for (ssize_t byte_index : present_matched_range) {
  724. entry_callback(local_entries[group_index + byte_index]);
  725. }
  726. group_callback(&local_metadata[group_index]);
  727. }
  728. }
  729. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  730. auto ViewImpl<InputKeyT, InputValueT, InputKeyContextT>::ComputeMetricsImpl(
  731. KeyContextT key_context) const -> Metrics {
  732. uint8_t* local_metadata = metadata();
  733. EntryT* local_entries = entries();
  734. ssize_t local_size = alloc_size_;
  735. Metrics metrics;
  736. // Compute the ones we can directly.
  737. metrics.deleted_count = llvm::count(
  738. llvm::ArrayRef(local_metadata, local_size), MetadataGroup::Deleted);
  739. metrics.storage_bytes = AllocByteSize(local_size);
  740. // We want to process present slots specially to collect metrics on their
  741. // probing behavior.
  742. for (ssize_t group_index = 0; group_index < local_size;
  743. group_index += GroupSize) {
  744. auto g = MetadataGroup::Load(local_metadata, group_index);
  745. auto present_matched_range = g.MatchPresent();
  746. for (ssize_t byte_index : present_matched_range) {
  747. ++metrics.key_count;
  748. ssize_t index = group_index + byte_index;
  749. HashCode hash =
  750. key_context.HashKey(local_entries[index].key(), ComputeSeed());
  751. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  752. ProbeSequence s(hash_index, local_size);
  753. metrics.probed_key_count +=
  754. static_cast<ssize_t>(s.index() != group_index);
  755. // For each probed key, go through the probe sequence to find both the
  756. // probe distance and how many comparisons are required.
  757. ssize_t distance = 0;
  758. ssize_t compares = 0;
  759. for (; s.index() != group_index; s.Next()) {
  760. auto probe_g = MetadataGroup::Load(local_metadata, s.index());
  761. auto probe_matched_range = probe_g.Match(tag);
  762. compares += std::distance(probe_matched_range.begin(),
  763. probe_matched_range.end());
  764. distance += 1;
  765. }
  766. auto probe_g = MetadataGroup::Load(local_metadata, s.index());
  767. auto probe_matched_range = probe_g.Match(tag);
  768. CARBON_CHECK(!probe_matched_range.empty());
  769. for (ssize_t match_index : probe_matched_range) {
  770. if (match_index >= byte_index) {
  771. // Note we only count the compares that will *fail* as part of
  772. // probing. The last successful compare isn't interesting, it is
  773. // always needed.
  774. break;
  775. }
  776. compares += 1;
  777. }
  778. metrics.probe_avg_distance += distance;
  779. metrics.probe_max_distance =
  780. std::max(metrics.probe_max_distance, distance);
  781. metrics.probe_avg_compares += compares;
  782. metrics.probe_max_compares =
  783. std::max(metrics.probe_max_compares, compares);
  784. }
  785. }
  786. if (metrics.key_count > 0) {
  787. metrics.probe_avg_compares /= metrics.key_count;
  788. metrics.probe_avg_distance /= metrics.key_count;
  789. }
  790. return metrics;
  791. }
  792. // TODO: Evaluate whether it is worth forcing this out-of-line given the
  793. // reasonable ABI boundary it forms and large volume of code necessary to
  794. // implement it.
  795. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  796. template <typename LookupKeyT>
  797. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::InsertImpl(
  798. LookupKeyT lookup_key, KeyContextT key_context)
  799. -> std::pair<EntryT*, bool> {
  800. CARBON_DCHECK(alloc_size() > 0);
  801. PrefetchStorage();
  802. uint8_t* local_metadata = metadata();
  803. HashCode hash = key_context.HashKey(lookup_key, ComputeSeed());
  804. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  805. // We re-purpose the empty control byte to signal no insert is needed to the
  806. // caller. This is guaranteed to not be a control byte we're inserting.
  807. // constexpr uint8_t NoInsertNeeded = Group::Empty;
  808. ssize_t group_with_deleted_index;
  809. MetadataGroup::MatchIndex deleted_match = {};
  810. EntryT* local_entries = entries();
  811. auto return_insert_at_index = [&](ssize_t index) -> std::pair<EntryT*, bool> {
  812. // We'll need to insert at this index so set the control group byte to the
  813. // proper value.
  814. local_metadata[index] = tag | MetadataGroup::PresentMask;
  815. return {&local_entries[index], true};
  816. };
  817. for (ProbeSequence s(hash_index, alloc_size());; s.Next()) {
  818. ssize_t group_index = s.index();
  819. // Load the group's metadata and prefetch the entries for this group. The
  820. // prefetch here helps hide key access latency while we're matching the
  821. // metadata.
  822. auto g = MetadataGroup::Load(local_metadata, group_index);
  823. EntryT* group_entries = &local_entries[group_index];
  824. ViewImplT::PrefetchEntryGroup(group_entries);
  825. auto control_byte_matched_range = g.Match(tag);
  826. if (control_byte_matched_range) {
  827. auto byte_it = control_byte_matched_range.begin();
  828. auto byte_end = control_byte_matched_range.end();
  829. do {
  830. EntryT* entry = byte_it.index_ptr(group_entries);
  831. if (LLVM_LIKELY(key_context.KeyEq(lookup_key, entry->key()))) {
  832. return {entry, false};
  833. }
  834. ++byte_it;
  835. } while (LLVM_UNLIKELY(byte_it != byte_end));
  836. }
  837. // Track the first group with a deleted entry that we could insert over.
  838. if (!deleted_match) {
  839. deleted_match = g.MatchDeleted();
  840. group_with_deleted_index = group_index;
  841. }
  842. // We failed to find a matching entry in this bucket, so check if there are
  843. // no empty slots. In that case, we'll continue probing.
  844. auto empty_match = g.MatchEmpty();
  845. if (!empty_match) {
  846. continue;
  847. }
  848. // Ok, we've finished probing without finding anything and need to insert
  849. // instead.
  850. // If we found a deleted slot, we don't need the probe sequence to insert
  851. // so just bail. We want to ensure building up a table is fast so we
  852. // de-prioritize this a bit. In practice this doesn't have too much of an
  853. // effect.
  854. if (LLVM_UNLIKELY(deleted_match)) {
  855. return return_insert_at_index(group_with_deleted_index +
  856. deleted_match.index());
  857. }
  858. // We're going to need to grow by inserting into an empty slot. Check that
  859. // we have the budget for that before we compute the exact index of the
  860. // empty slot. Without the growth budget we'll have to completely rehash and
  861. // so we can just bail here.
  862. if (LLVM_UNLIKELY(growth_budget_ == 0)) {
  863. return {GrowAndInsert(hash, key_context), true};
  864. }
  865. --growth_budget_;
  866. CARBON_DCHECK(growth_budget() >= 0,
  867. "Growth budget shouldn't have gone negative!");
  868. return return_insert_at_index(group_index + empty_match.index());
  869. }
  870. CARBON_FATAL(
  871. "We should never finish probing without finding the entry or an empty "
  872. "slot.");
  873. }
  874. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  875. [[clang::noinline]] auto
  876. BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowToAllocSizeImpl(
  877. ssize_t target_alloc_size, KeyContextT key_context) -> void {
  878. CARBON_CHECK(llvm::isPowerOf2_64(target_alloc_size));
  879. if (target_alloc_size <= alloc_size()) {
  880. return;
  881. }
  882. // If this is the next alloc size, we can used our optimized growth strategy.
  883. if (target_alloc_size == ComputeNextAllocSize(alloc_size())) {
  884. GrowToNextAllocSize(key_context);
  885. return;
  886. }
  887. // Create locals for the old state of the table.
  888. ssize_t old_size = alloc_size();
  889. CARBON_DCHECK(old_size > 0);
  890. bool old_small = is_small();
  891. Storage* old_storage = storage();
  892. uint8_t* old_metadata = metadata();
  893. EntryT* old_entries = entries();
  894. // Configure for the new size and allocate the new storage.
  895. alloc_size() = target_alloc_size;
  896. storage() = Allocate(target_alloc_size);
  897. std::memset(metadata(), 0, target_alloc_size);
  898. growth_budget_ = GrowthThresholdForAllocSize(target_alloc_size);
  899. // Just re-insert all the entries. As we're more than doubling the table size,
  900. // we don't bother with fancy optimizations here. Even using `memcpy` for the
  901. // entries seems unlikely to be a significant win given how sparse the
  902. // insertions will end up being.
  903. ssize_t count = 0;
  904. for (ssize_t group_index = 0; group_index < old_size;
  905. group_index += GroupSize) {
  906. auto g = MetadataGroup::Load(old_metadata, group_index);
  907. auto present_matched_range = g.MatchPresent();
  908. for (ssize_t byte_index : present_matched_range) {
  909. ++count;
  910. ssize_t index = group_index + byte_index;
  911. HashCode hash =
  912. key_context.HashKey(old_entries[index].key(), ComputeSeed());
  913. EntryT* new_entry = InsertIntoEmpty(hash);
  914. new_entry->MoveFrom(std::move(old_entries[index]));
  915. }
  916. }
  917. growth_budget_ -= count;
  918. if (!old_small) {
  919. // Old isn't a small buffer, so we need to deallocate it.
  920. Deallocate(old_storage, old_size);
  921. }
  922. }
  923. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  924. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowForInsertCountImpl(
  925. ssize_t count, KeyContextT key_context) -> void {
  926. if (count < growth_budget_) {
  927. // Already space for the needed growth.
  928. return;
  929. }
  930. // Currently, we don't account for any tombstones marking deleted elements,
  931. // and just conservatively ensure the growth will create adequate growth
  932. // budget for insertions. We could make this more precise by instead walking
  933. // the table and only counting present slots, as once we grow we'll be able to
  934. // reclaim all of the deleted slots. But this adds complexity and it isn't
  935. // clear this is necessary so we do the simpler conservative thing.
  936. ssize_t used_budget =
  937. GrowthThresholdForAllocSize(alloc_size()) - growth_budget_;
  938. ssize_t budget_needed = used_budget + count;
  939. ssize_t space_needed = budget_needed + (budget_needed / 7);
  940. ssize_t target_alloc_size = llvm::NextPowerOf2(space_needed);
  941. CARBON_CHECK(GrowthThresholdForAllocSize(target_alloc_size) >
  942. (budget_needed));
  943. GrowToAllocSizeImpl(target_alloc_size, key_context);
  944. }
  945. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  946. template <typename LookupKeyT>
  947. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::EraseImpl(
  948. LookupKeyT lookup_key, KeyContextT key_context) -> bool {
  949. EntryT* entry = view_impl_.LookupEntry(lookup_key, key_context);
  950. if (!entry) {
  951. return false;
  952. }
  953. // If there are empty slots in this group then nothing will probe past this
  954. // group looking for an entry so we can simply set this slot to empty as
  955. // well. However, if every slot in this group is full, it might be part of
  956. // a long probe chain that we can't disrupt. In that case we mark the slot's
  957. // metadata as deleted to keep probes continuing past it.
  958. //
  959. // If we mark the slot as empty, we'll also need to increase the growth
  960. // budget.
  961. uint8_t* local_metadata = metadata();
  962. EntryT* local_entries = entries();
  963. ssize_t index = entry - local_entries;
  964. ssize_t group_index = index & ~GroupMask;
  965. auto g = MetadataGroup::Load(local_metadata, group_index);
  966. auto empty_matched_range = g.MatchEmpty();
  967. if (empty_matched_range) {
  968. local_metadata[index] = MetadataGroup::Empty;
  969. ++growth_budget_;
  970. } else {
  971. local_metadata[index] = MetadataGroup::Deleted;
  972. }
  973. if constexpr (!EntryT::IsTriviallyDestructible) {
  974. entry->Destroy();
  975. }
  976. return true;
  977. }
  978. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  979. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::ClearImpl() -> void {
  980. view_impl_.ForEachEntry(
  981. [](EntryT& entry) {
  982. if constexpr (!EntryT::IsTriviallyDestructible) {
  983. entry.Destroy();
  984. }
  985. },
  986. [](uint8_t* metadata_group) {
  987. // Clear the group.
  988. std::memset(metadata_group, 0, GroupSize);
  989. });
  990. growth_budget_ = GrowthThresholdForAllocSize(alloc_size());
  991. }
  992. // Allocates the appropriate memory layout for a table of the given
  993. // `alloc_size`, with space both for the metadata array and entries.
  994. //
  995. // The returned pointer *must* be deallocated by calling the below `Deallocate`
  996. // function with the same `alloc_size` as used here.
  997. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  998. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Allocate(
  999. ssize_t alloc_size) -> Storage* {
  1000. return reinterpret_cast<Storage*>(__builtin_operator_new(
  1001. ViewImplT::AllocByteSize(alloc_size),
  1002. static_cast<std::align_val_t>(Alignment), std::nothrow_t()));
  1003. }
  1004. // Deallocates a table's storage that was allocated with the `Allocate`
  1005. // function.
  1006. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1007. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Deallocate(
  1008. Storage* storage, ssize_t alloc_size) -> void {
  1009. ssize_t allocated_size = ViewImplT::AllocByteSize(alloc_size);
  1010. // We don't need the size, but make sure it always compiles.
  1011. static_cast<void>(allocated_size);
  1012. __builtin_operator_delete(storage,
  1013. #if __cpp_sized_deallocation
  1014. allocated_size,
  1015. #endif
  1016. static_cast<std::align_val_t>(Alignment));
  1017. }
  1018. // Construct a table using the provided small storage if `small_alloc_size_` is
  1019. // non-zero. If `small_alloc_size_` is zero, then `small_storage` won't be used
  1020. // and can be null. Regardless, after this the storage pointer is non-null and
  1021. // the size is non-zero so that we can directly begin inserting or querying the
  1022. // table.
  1023. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1024. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Construct(
  1025. Storage* small_storage) -> void {
  1026. if (small_alloc_size_ > 0) {
  1027. alloc_size() = small_alloc_size_;
  1028. storage() = small_storage;
  1029. } else {
  1030. // Directly allocate the initial buffer so that the hashtable is never in
  1031. // an empty state.
  1032. alloc_size() = MinAllocatedSize;
  1033. storage() = Allocate(MinAllocatedSize);
  1034. }
  1035. std::memset(metadata(), 0, alloc_size());
  1036. growth_budget_ = GrowthThresholdForAllocSize(alloc_size());
  1037. }
  1038. // Destroy the current table, releasing any memory used.
  1039. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1040. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Destroy() -> void {
  1041. // Check for a moved-from state and don't do anything. Only a moved-from table
  1042. // has a zero size.
  1043. if (alloc_size() == 0) {
  1044. return;
  1045. }
  1046. // Destroy all the entries.
  1047. if constexpr (!EntryT::IsTriviallyDestructible) {
  1048. view_impl_.ForEachEntry([](EntryT& entry) { entry.Destroy(); },
  1049. [](auto...) {});
  1050. }
  1051. // If small, nothing to deallocate.
  1052. if (is_small()) {
  1053. return;
  1054. }
  1055. // Just deallocate the storage without updating anything when destroying the
  1056. // object.
  1057. Deallocate(storage(), alloc_size());
  1058. }
  1059. // Copy all of the slots over from another table that is exactly the same
  1060. // allocation size.
  1061. //
  1062. // This requires the current table to already have storage allocated and set up
  1063. // but not initialized (or already cleared). It directly overwrites the storage
  1064. // allocation of the table to match the incoming argument.
  1065. //
  1066. // Despite being used in construction, this shouldn't be called for a moved-from
  1067. // `arg` -- in practice it is better for callers to handle this when setting up
  1068. // storage.
  1069. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1070. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::CopySlotsFrom(
  1071. const BaseImpl& arg) -> void {
  1072. CARBON_DCHECK(alloc_size() == arg.alloc_size());
  1073. ssize_t local_size = alloc_size();
  1074. // Preserve which slot every entry is in, including tombstones in the
  1075. // metadata, in order to copy into the new table's storage without rehashing
  1076. // all of the keys. This is especially important as we don't have an easy way
  1077. // to access the key context needed for rehashing here.
  1078. uint8_t* local_metadata = metadata();
  1079. EntryT* local_entries = entries();
  1080. const uint8_t* local_arg_metadata = arg.metadata();
  1081. const EntryT* local_arg_entries = arg.entries();
  1082. memcpy(local_metadata, local_arg_metadata, local_size);
  1083. for (ssize_t group_index = 0; group_index < local_size;
  1084. group_index += GroupSize) {
  1085. auto g = MetadataGroup::Load(local_arg_metadata, group_index);
  1086. for (ssize_t byte_index : g.MatchPresent()) {
  1087. local_entries[group_index + byte_index].CopyFrom(
  1088. local_arg_entries[group_index + byte_index]);
  1089. }
  1090. }
  1091. }
  1092. // Move from another table to this one.
  1093. //
  1094. // Note that the `small_storage` is *this* table's small storage pointer,
  1095. // provided from the `TableImpl` to this `BaseImpl` method as an argument.
  1096. //
  1097. // Requires the table to have size and growth already set up but otherwise the
  1098. // the table has not yet been initialized. Notably, storage should either not
  1099. // yet be constructed or already destroyed. It both sets up the storage and
  1100. // handles any moving slots needed.
  1101. //
  1102. // Note that because this is used in construction it needs to handle a
  1103. // moved-from `arg`.
  1104. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1105. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::MoveFrom(
  1106. BaseImpl&& arg, Storage* small_storage) -> void {
  1107. ssize_t local_size = alloc_size();
  1108. CARBON_DCHECK(local_size == arg.alloc_size());
  1109. // If `arg` is moved-from, skip the rest as the local size is all we need.
  1110. if (local_size == 0) {
  1111. return;
  1112. }
  1113. if (arg.is_small()) {
  1114. CARBON_DCHECK(local_size == small_alloc_size_);
  1115. this->storage() = small_storage;
  1116. // For small tables, we have to move the entries as we can't move the tables
  1117. // themselves. We do this preserving their slots and even tombstones to
  1118. // avoid rehashing.
  1119. uint8_t* local_metadata = this->metadata();
  1120. EntryT* local_entries = this->entries();
  1121. uint8_t* local_arg_metadata = arg.metadata();
  1122. EntryT* local_arg_entries = arg.entries();
  1123. memcpy(local_metadata, local_arg_metadata, local_size);
  1124. if (EntryT::IsTriviallyRelocatable) {
  1125. memcpy(local_entries, local_arg_entries, local_size * sizeof(EntryT));
  1126. } else {
  1127. for (ssize_t group_index = 0; group_index < local_size;
  1128. group_index += GroupSize) {
  1129. auto g = MetadataGroup::Load(local_arg_metadata, group_index);
  1130. for (ssize_t byte_index : g.MatchPresent()) {
  1131. local_entries[group_index + byte_index].MoveFrom(
  1132. std::move(local_arg_entries[group_index + byte_index]));
  1133. }
  1134. }
  1135. }
  1136. } else {
  1137. // Just point to the allocated storage.
  1138. storage() = arg.storage();
  1139. }
  1140. // Finally, put the incoming table into a moved-from state.
  1141. arg.alloc_size() = 0;
  1142. // Replace the pointer with null to ease debugging.
  1143. arg.storage() = nullptr;
  1144. }
  1145. // Optimized routine to insert a key into a table when that key *definitely*
  1146. // isn't present in the table and the table *definitely* has a viable empty slot
  1147. // (and growth space) to insert into before any deleted slots. When both of
  1148. // these are true, typically just after growth, we can dramatically simplify the
  1149. // insert position search.
  1150. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1151. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::InsertIntoEmpty(
  1152. HashCode hash) -> EntryT* {
  1153. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  1154. uint8_t* local_metadata = metadata();
  1155. EntryT* local_entries = entries();
  1156. for (ProbeSequence s(hash_index, alloc_size());; s.Next()) {
  1157. ssize_t group_index = s.index();
  1158. auto g = MetadataGroup::Load(local_metadata, group_index);
  1159. if (auto empty_match = g.MatchEmpty()) {
  1160. ssize_t index = group_index + empty_match.index();
  1161. local_metadata[index] = tag | MetadataGroup::PresentMask;
  1162. return &local_entries[index];
  1163. }
  1164. // Otherwise we continue probing.
  1165. }
  1166. }
  1167. // Apply our doubling growth strategy and (re-)check invariants around table
  1168. // size.
  1169. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1170. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::ComputeNextAllocSize(
  1171. ssize_t old_alloc_size) -> ssize_t {
  1172. CARBON_DCHECK(llvm::isPowerOf2_64(old_alloc_size),
  1173. "Expected a power of two!");
  1174. ssize_t new_alloc_size;
  1175. bool overflow = __builtin_mul_overflow(old_alloc_size, 2, &new_alloc_size);
  1176. CARBON_CHECK(!overflow, "Computing the new size overflowed `ssize_t`!");
  1177. return new_alloc_size;
  1178. }
  1179. // Compute the growth threshold for a given size.
  1180. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1181. auto BaseImpl<InputKeyT, InputValueT,
  1182. InputKeyContextT>::GrowthThresholdForAllocSize(ssize_t alloc_size)
  1183. -> ssize_t {
  1184. // We use a 7/8ths load factor to trigger growth.
  1185. return alloc_size - alloc_size / 8;
  1186. }
  1187. // Optimized routine for growing to the next alloc size.
  1188. //
  1189. // A particularly common and important-to-optimize path is growing to the next
  1190. // alloc size, which will always be a doubling of the allocated size. This
  1191. // allows an important optimization -- we're adding exactly one more high bit to
  1192. // the hash-computed index for each entry. This in turn means we can classify
  1193. // every entry in the table into three cases:
  1194. //
  1195. // 1) The new high bit is zero, the entry is at the same index in the new
  1196. // table as the old.
  1197. //
  1198. // 2) The new high bit is one, the entry is at the old index plus the old
  1199. // size.
  1200. //
  1201. // 3) The entry's current index doesn't match the initial hash index because
  1202. // it required some amount of probing to find an empty slot.
  1203. //
  1204. // The design of the hash table tries to minimize how many entries fall into
  1205. // case (3), so we expect the vast majority of entries to be in (1) or (2). This
  1206. // lets us model growth notionally as copying the hashtable twice into the lower
  1207. // and higher halves of the new allocation, clearing out the now-empty slots
  1208. // (from both deleted entries and entries in the other half of the table after
  1209. // growth), and inserting any probed elements. That model in turn is much more
  1210. // efficient than re-inserting all of the elements as it avoids the unnecessary
  1211. // parts of insertion and avoids interleaving random accesses for the probed
  1212. // elements. But most importantly, for trivially relocatable types it allows us
  1213. // to use `memcpy` rather than moving the elements individually.
  1214. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1215. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowToNextAllocSize(
  1216. KeyContextT key_context) -> void {
  1217. // We collect the probed elements in a small vector for re-insertion. It is
  1218. // tempting to reuse the already allocated storage, but doing so appears to
  1219. // be a (very slight) performance regression. These are relatively rare and
  1220. // storing them into the existing storage creates stores to the same regions
  1221. // of memory we're reading. Moreover, it requires moving both the key and the
  1222. // value twice, and doing the `memcpy` widening for relocatable types before
  1223. // the group walk rather than after the group walk. In practice, between the
  1224. // statistical rareness and using a large small size buffer here on the stack,
  1225. // we can handle this most efficiently with temporary, additional storage.
  1226. llvm::SmallVector<std::pair<ssize_t, HashCode>, 128> probed_indices;
  1227. // Create locals for the old state of the table.
  1228. ssize_t old_size = alloc_size();
  1229. CARBON_DCHECK(old_size > 0);
  1230. bool old_small = is_small();
  1231. Storage* old_storage = storage();
  1232. uint8_t* old_metadata = metadata();
  1233. EntryT* old_entries = entries();
  1234. #ifndef NDEBUG
  1235. // Count how many of the old table slots will end up being empty after we grow
  1236. // the table. This is both the currently empty slots, but also the deleted
  1237. // slots because we clear them to empty and re-insert everything that had any
  1238. // probing.
  1239. ssize_t debug_empty_count =
  1240. llvm::count(llvm::ArrayRef(old_metadata, old_size), MetadataGroup::Empty);
  1241. ssize_t debug_deleted_count = llvm::count(
  1242. llvm::ArrayRef(old_metadata, old_size), MetadataGroup::Deleted);
  1243. CARBON_DCHECK(
  1244. debug_empty_count >= (old_size - GrowthThresholdForAllocSize(old_size)),
  1245. "debug_empty_count: {0}, debug_deleted_count: {1}, size: {2}",
  1246. debug_empty_count, debug_deleted_count, old_size);
  1247. #endif
  1248. // Configure for the new size and allocate the new storage.
  1249. ssize_t new_size = ComputeNextAllocSize(old_size);
  1250. alloc_size() = new_size;
  1251. storage() = Allocate(new_size);
  1252. growth_budget_ = GrowthThresholdForAllocSize(new_size);
  1253. // Now extract the new components of the table.
  1254. uint8_t* new_metadata = metadata();
  1255. EntryT* new_entries = entries();
  1256. // Walk the metadata groups, clearing deleted to empty, duplicating the
  1257. // metadata for the low and high halves, and updating it based on where each
  1258. // entry will go in the new table. The updated metadata group is written to
  1259. // the new table, and for non-trivially relocatable entry types, the entry is
  1260. // also moved to its new location.
  1261. ssize_t count = 0;
  1262. for (ssize_t group_index = 0; group_index < old_size;
  1263. group_index += GroupSize) {
  1264. auto low_g = MetadataGroup::Load(old_metadata, group_index);
  1265. // Make sure to match present elements first to enable pipelining with
  1266. // clearing.
  1267. auto present_matched_range = low_g.MatchPresent();
  1268. low_g.ClearDeleted();
  1269. MetadataGroup high_g;
  1270. if constexpr (MetadataGroup::FastByteClear) {
  1271. // When we have a fast byte clear, we can update the metadata for the
  1272. // growth in-register and store at the end.
  1273. high_g = low_g;
  1274. } else {
  1275. // If we don't have a fast byte clear, we can store the metadata group
  1276. // eagerly here and overwrite bytes with a byte store below instead of
  1277. // clearing the byte in-register.
  1278. low_g.Store(new_metadata, group_index);
  1279. low_g.Store(new_metadata, group_index | old_size);
  1280. }
  1281. for (ssize_t byte_index : present_matched_range) {
  1282. ++count;
  1283. ssize_t old_index = group_index + byte_index;
  1284. if constexpr (!MetadataGroup::FastByteClear) {
  1285. CARBON_DCHECK(new_metadata[old_index] == old_metadata[old_index]);
  1286. CARBON_DCHECK(new_metadata[old_index | old_size] ==
  1287. old_metadata[old_index]);
  1288. }
  1289. HashCode hash =
  1290. key_context.HashKey(old_entries[old_index].key(), ComputeSeed());
  1291. ssize_t old_hash_index = hash.ExtractIndexAndTag<7>().first &
  1292. ComputeProbeMaskFromSize(old_size);
  1293. if (LLVM_UNLIKELY(old_hash_index != group_index)) {
  1294. probed_indices.push_back({old_index, hash});
  1295. if constexpr (MetadataGroup::FastByteClear) {
  1296. low_g.ClearByte(byte_index);
  1297. high_g.ClearByte(byte_index);
  1298. } else {
  1299. new_metadata[old_index] = MetadataGroup::Empty;
  1300. new_metadata[old_index | old_size] = MetadataGroup::Empty;
  1301. }
  1302. continue;
  1303. }
  1304. ssize_t new_index = hash.ExtractIndexAndTag<7>().first &
  1305. ComputeProbeMaskFromSize(new_size);
  1306. CARBON_DCHECK(new_index == old_hash_index ||
  1307. new_index == (old_hash_index | old_size));
  1308. // Toggle the newly added bit of the index to get to the other possible
  1309. // target index.
  1310. if constexpr (MetadataGroup::FastByteClear) {
  1311. (new_index == old_hash_index ? high_g : low_g).ClearByte(byte_index);
  1312. new_index += byte_index;
  1313. } else {
  1314. new_index += byte_index;
  1315. new_metadata[new_index ^ old_size] = MetadataGroup::Empty;
  1316. }
  1317. // If we need to explicitly move (and destroy) the key or value, do so
  1318. // here where we already know its target.
  1319. if constexpr (!EntryT::IsTriviallyRelocatable) {
  1320. new_entries[new_index].MoveFrom(std::move(old_entries[old_index]));
  1321. }
  1322. }
  1323. if constexpr (MetadataGroup::FastByteClear) {
  1324. low_g.Store(new_metadata, group_index);
  1325. high_g.Store(new_metadata, (group_index | old_size));
  1326. }
  1327. }
  1328. CARBON_DCHECK((count - static_cast<ssize_t>(probed_indices.size())) ==
  1329. (new_size - llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1330. MetadataGroup::Empty)));
  1331. #ifndef NDEBUG
  1332. CARBON_DCHECK((debug_empty_count + debug_deleted_count) ==
  1333. (old_size - count));
  1334. CARBON_DCHECK(llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1335. MetadataGroup::Empty) ==
  1336. debug_empty_count + debug_deleted_count +
  1337. static_cast<ssize_t>(probed_indices.size()) + old_size);
  1338. #endif
  1339. // If the keys or values are trivially relocatable, we do a bulk memcpy of
  1340. // them into place. This will copy them into both possible locations, which is
  1341. // fine. One will be empty and clobbered if reused or ignored. The other will
  1342. // be the one used. This might seem like it needs it to be valid for us to
  1343. // create two copies, but it doesn't. This produces the exact same storage as
  1344. // copying the storage into the wrong location first, and then again into the
  1345. // correct location. Only one is live and only one is destroyed.
  1346. if constexpr (EntryT::IsTriviallyRelocatable) {
  1347. memcpy(new_entries, old_entries, old_size * sizeof(EntryT));
  1348. memcpy(new_entries + old_size, old_entries, old_size * sizeof(EntryT));
  1349. }
  1350. // We then need to do a normal insertion for anything that was probed before
  1351. // growth, but we know we'll find an empty slot, so leverage that.
  1352. for (auto [old_index, hash] : probed_indices) {
  1353. EntryT* new_entry = InsertIntoEmpty(hash);
  1354. new_entry->MoveFrom(std::move(old_entries[old_index]));
  1355. }
  1356. CARBON_DCHECK(count ==
  1357. (new_size - llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1358. MetadataGroup::Empty)));
  1359. growth_budget_ -= count;
  1360. CARBON_DCHECK(growth_budget_ ==
  1361. (GrowthThresholdForAllocSize(new_size) -
  1362. (new_size - llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1363. MetadataGroup::Empty))));
  1364. CARBON_DCHECK(growth_budget_ > 0 &&
  1365. "Must still have a growth budget after rehash!");
  1366. if (!old_small) {
  1367. // Old isn't a small buffer, so we need to deallocate it.
  1368. Deallocate(old_storage, old_size);
  1369. }
  1370. }
  1371. // Grow the hashtable to create space and then insert into it. Returns the
  1372. // selected insertion entry. Never returns null. In addition to growing and
  1373. // selecting the insertion entry, this routine updates the metadata array so
  1374. // that this function can be directly called and the result returned from
  1375. // `InsertImpl`.
  1376. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1377. [[clang::noinline]] auto
  1378. BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowAndInsert(
  1379. HashCode hash, KeyContextT key_context) -> EntryT* {
  1380. GrowToNextAllocSize(key_context);
  1381. // And insert the lookup_key into an index in the newly grown map and return
  1382. // that index for use.
  1383. --growth_budget_;
  1384. return InsertIntoEmpty(hash);
  1385. }
  1386. template <typename InputBaseT, ssize_t SmallSize>
  1387. TableImpl<InputBaseT, SmallSize>::TableImpl(const TableImpl& arg)
  1388. : BaseT(arg.alloc_size(), arg.growth_budget_, SmallSize) {
  1389. // Check for completely broken objects. These invariants should be true even
  1390. // in a moved-from state.
  1391. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1392. arg.alloc_size() == SmallSize);
  1393. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1394. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1395. if (this->alloc_size() != 0) {
  1396. SetUpStorage();
  1397. this->CopySlotsFrom(arg);
  1398. }
  1399. }
  1400. template <typename InputBaseT, ssize_t SmallSize>
  1401. auto TableImpl<InputBaseT, SmallSize>::operator=(const TableImpl& arg)
  1402. -> TableImpl& {
  1403. // Check for completely broken objects. These invariants should be true even
  1404. // in a moved-from state.
  1405. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1406. arg.alloc_size() == SmallSize);
  1407. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1408. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1409. // We have to end up with an allocation size exactly equivalent to the
  1410. // incoming argument to avoid re-hashing every entry in the table, which isn't
  1411. // possible without key context.
  1412. if (arg.alloc_size() == this->alloc_size()) {
  1413. // No effective way for self-assignment to fall out of an efficient
  1414. // implementation so detect and bypass here. Similarly, if both are in a
  1415. // moved-from state, there is nothing to do.
  1416. if (&arg == this || this->alloc_size() == 0) {
  1417. return *this;
  1418. }
  1419. CARBON_DCHECK(arg.storage() != this->storage());
  1420. if constexpr (!EntryT::IsTriviallyDestructible) {
  1421. this->view_impl_.ForEachEntry([](EntryT& entry) { entry.Destroy(); },
  1422. [](auto...) {});
  1423. }
  1424. } else {
  1425. // The sizes don't match so destroy everything and re-setup the table
  1426. // storage.
  1427. this->Destroy();
  1428. this->alloc_size() = arg.alloc_size();
  1429. // If `arg` is moved-from, we've clear out our elements and put ourselves
  1430. // into a moved-from state. We're done.
  1431. if (this->alloc_size() == 0) {
  1432. return *this;
  1433. }
  1434. SetUpStorage();
  1435. }
  1436. this->growth_budget_ = arg.growth_budget_;
  1437. this->CopySlotsFrom(arg);
  1438. return *this;
  1439. }
  1440. // Puts the incoming table into a moved-from state that can be destroyed or
  1441. // re-initialized but must not be used otherwise.
  1442. template <typename InputBaseT, ssize_t SmallSize>
  1443. TableImpl<InputBaseT, SmallSize>::TableImpl(TableImpl&& arg) noexcept
  1444. : BaseT(arg.alloc_size(), arg.growth_budget_, SmallSize) {
  1445. // Check for completely broken objects. These invariants should be true even
  1446. // in a moved-from state.
  1447. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1448. arg.alloc_size() == SmallSize);
  1449. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1450. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1451. this->MoveFrom(std::move(arg), small_storage());
  1452. }
  1453. template <typename InputBaseT, ssize_t SmallSize>
  1454. auto TableImpl<InputBaseT, SmallSize>::operator=(TableImpl&& arg) noexcept
  1455. -> TableImpl& {
  1456. // Check for completely broken objects. These invariants should be true even
  1457. // in a moved-from state.
  1458. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1459. arg.alloc_size() == SmallSize);
  1460. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1461. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1462. // Destroy and deallocate our table.
  1463. this->Destroy();
  1464. // Defend against self-move by zeroing the size here before we start moving
  1465. // out of `arg`.
  1466. this->alloc_size() = 0;
  1467. // Setup to match argument and then finish the move.
  1468. this->alloc_size() = arg.alloc_size();
  1469. this->growth_budget_ = arg.growth_budget_;
  1470. this->MoveFrom(std::move(arg), small_storage());
  1471. return *this;
  1472. }
  1473. template <typename InputBaseT, ssize_t SmallSize>
  1474. TableImpl<InputBaseT, SmallSize>::~TableImpl() {
  1475. this->Destroy();
  1476. }
  1477. // Reset a table to its original state, including releasing any allocated
  1478. // memory.
  1479. template <typename InputBaseT, ssize_t SmallSize>
  1480. auto TableImpl<InputBaseT, SmallSize>::ResetImpl() -> void {
  1481. this->Destroy();
  1482. // Re-initialize the whole thing.
  1483. CARBON_DCHECK(this->small_alloc_size() == SmallSize);
  1484. this->Construct(small_storage());
  1485. }
  1486. template <typename InputBaseT, ssize_t SmallSize>
  1487. auto TableImpl<InputBaseT, SmallSize>::small_storage() const -> Storage* {
  1488. if constexpr (SmallSize > 0) {
  1489. // Do a bunch of validation of the small size to establish our invariants
  1490. // when we know we have a non-zero small size.
  1491. static_assert(llvm::isPowerOf2_64(SmallSize),
  1492. "SmallSize must be a power of two for a hashed buffer!");
  1493. static_assert(
  1494. SmallSize >= MaxGroupSize,
  1495. "We require all small sizes to multiples of the largest group "
  1496. "size supported to ensure it can be used portably. ");
  1497. static_assert(
  1498. (SmallSize % MaxGroupSize) == 0,
  1499. "Small size must be a multiple of the max group size supported "
  1500. "so that we can allocate a whole number of groups.");
  1501. // Implied by the max asserts above.
  1502. static_assert(SmallSize >= GroupSize);
  1503. static_assert((SmallSize % GroupSize) == 0);
  1504. static_assert(SmallSize >= alignof(StorageEntry<KeyT, ValueT>),
  1505. "Requested a small size that would require padding between "
  1506. "metadata bytes and correctly aligned key and value types. "
  1507. "Either a larger small size or a zero small size and heap "
  1508. "allocation are required for this key and value type.");
  1509. static_assert(offsetof(SmallStorage, entries) == SmallSize,
  1510. "Offset to entries in small size storage doesn't match "
  1511. "computed offset!");
  1512. return &small_storage_;
  1513. } else {
  1514. static_assert(
  1515. sizeof(TableImpl) == sizeof(BaseT),
  1516. "Empty small storage caused a size difference and wasted space!");
  1517. return nullptr;
  1518. }
  1519. }
  1520. // Helper to set up the storage of a table when a specific size has already been
  1521. // set up. If possible, uses any small storage, otherwise allocates.
  1522. template <typename InputBaseT, ssize_t SmallSize>
  1523. auto TableImpl<InputBaseT, SmallSize>::SetUpStorage() -> void {
  1524. CARBON_DCHECK(this->small_alloc_size() == SmallSize);
  1525. ssize_t local_size = this->alloc_size();
  1526. CARBON_DCHECK(local_size != 0);
  1527. if (local_size == SmallSize) {
  1528. this->storage() = small_storage();
  1529. } else {
  1530. this->storage() = BaseT::Allocate(local_size);
  1531. }
  1532. }
  1533. } // namespace Carbon::RawHashtable
  1534. #endif // CARBON_COMMON_RAW_HASHTABLE_H_