raw_hashtable.h 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #ifndef CARBON_COMMON_RAW_HASHTABLE_H_
  5. #define CARBON_COMMON_RAW_HASHTABLE_H_
  6. #include <algorithm>
  7. #include <concepts>
  8. #include <cstddef>
  9. #include <cstring>
  10. #include <iterator>
  11. #include <new>
  12. #include <type_traits>
  13. #include <utility>
  14. #include "common/check.h"
  15. #include "common/hashing.h"
  16. #include "common/raw_hashtable_metadata_group.h"
  17. #include "llvm/Support/Compiler.h"
  18. #include "llvm/Support/MathExtras.h"
  19. // A namespace collecting a set of low-level utilities for building hashtable
  20. // data structures. These should only be used as implementation details of
  21. // higher-level data-structure APIs.
  22. //
  23. // The utilities here use the `hashtable_key_context.h` provided `KeyContext` to
  24. // support the necessary hashtable operations on keys: hashing and comparison.
  25. // This also serves as the customization point for hashtables built on this
  26. // infrastructure for those operations. See that header file for details.
  27. //
  28. // These utilities support hashtables following a *specific* API design pattern,
  29. // and using Small-Size Optimization, or "SSO", when desired. We expect there to
  30. // be three layers to any hashtable design:
  31. //
  32. // - A *view* type: a read-only view of the hashtable contents. This type should
  33. // be a value type and is expected to be passed by-value in APIs. However, it
  34. // will have `const`-reference semantics, much like a `std::string_view`. Note
  35. // that the *entries* will continue to be mutable, it is only the *table* that
  36. // is read-only.
  37. //
  38. // - A *base* type: a base class type of the actual hashtable, which allows
  39. // almost all mutable operations but erases any specific SSO buffer size.
  40. // Because this is a base of the actual hash table, it is designed to be
  41. // passed as a non-`const` reference or pointer.
  42. //
  43. // - A *table* type: the actual hashtable which derives from the base type and
  44. // adds any desired SSO storage buffer. Beyond the physical storage, it also
  45. // allows resetting the table to its initial state & allocated size, as well
  46. // as copying and moving the table.
  47. //
  48. // For complete examples of the API design, see `set.h` for a hashtable-based
  49. // set data structure, and `map.h` for a hashtable-based map data structure.
  50. //
  51. // The hashtable design implemented here has several key invariants and design
  52. // elements that are essential to all three of the types above and the
  53. // functionality they provide.
  54. //
  55. // - The underlying hashtable uses [open addressing], a power-of-two table size,
  56. // and quadratic probing rather than closed addressing and chaining.
  57. //
  58. // [open addressing]: https://en.wikipedia.org/wiki/Open_addressing
  59. //
  60. // - Each _slot_ in the table corresponds to a key, a value, and one byte of
  61. // metadata. Each _entry_ is a key and value. The key and value for an entry
  62. // are stored together.
  63. //
  64. // - The allocated storage is organized into an array of metadata bytes followed
  65. // by an array of entry storage.
  66. //
  67. // - The metadata byte corresponding to each entry marks that entry is either
  68. // empty, deleted, or present. When present, a 7-bit tag is also stored using
  69. // another 7 bits from the hash of the entry key.
  70. //
  71. // - The storage for an entry is an internal type that should not be exposed to
  72. // users, and instead only the underlying keys and values.
  73. //
  74. // - The hash addressing and probing occurs over *groups* of slots rather than
  75. // individual entries. When inserting a new entry, it can be added to the
  76. // group it hashes to as long it is not full, and can even replace a slot with
  77. // a tombstone indicating a previously deleted entry. Only when the group is
  78. // full will it look at the next group in the probe sequence. As a result,
  79. // there may be entries in a group where a different group is the start of
  80. // that entry's probe sequence. Also, when performing a lookup, every group in
  81. // the probe sequence must be inspected for the lookup key until it is found
  82. // or the group has an empty slot.
  83. //
  84. // - Groups are scanned rapidly using the one-byte metadata for each entry in
  85. // the group and CPU instructions that allow comparing all of the metadata for
  86. // a group in parallel. For more details on the metadata group encoding and
  87. // scanning, see `raw_hashtable_metadata_group.h`.
  88. //
  89. // - `GroupSize` is a platform-specific relatively small power of two that fits
  90. // in some hardware register. However, `MaxGroupSize` is provided as a
  91. // portable max that is also a power of two. The table storage, whether
  92. // provided by an SSO buffer or allocated, is required to be a multiple of
  93. // `MaxGroupSize` to keep the requirement portable but sufficient for all
  94. // platforms.
  95. //
  96. // - There is *always* an allocated table of some multiple of `MaxGroupSize`.
  97. // This allows accesses to be branchless. When heap allocated, we pro-actively
  98. // allocate at least a minimum heap size table. When there is a small-size
  99. // optimization (SSO) buffer, that provides the initial allocation.
  100. //
  101. // - The table performs a minimal amount of bookkeeping that limits the APIs it
  102. // can support:
  103. // - `alloc_size` is the size of the table *allocated* (not *used*), and is
  104. // always a power of 2 at least as big as `MinAllocatedSize`.
  105. // - `storage` is a pointer to the storage for the `alloc_size` slots of the
  106. // table, and never null.
  107. // - `small_alloc_size` is the maximum `alloc_size` where the table is stored
  108. // in the object itself instead of separately on the heap. In this case,
  109. // `storage` points to `small_storage_`.
  110. // - `growth_budget` is the number of entries that may be added before the
  111. // table allocation is doubled. It is always
  112. // `GrowthThresholdForAllocSize(alloc_size)` minus the number of
  113. // non-empty (filled or deleted) slots. If it ever falls to 0, the table
  114. // is grown to keep it greater than 0.
  115. // There is also the "moved-from" state where the table may only be
  116. // reinitialized or destroyed where the `alloc_size` is 0 and `storage` is
  117. // null. Since it doesn't track the exact number of filled entries in a table,
  118. // it doesn't support a container-style `size` API.
  119. //
  120. // - There is no direct iterator support because of the complexity of embedding
  121. // the group-based metadata scanning into an iterator model. Instead, there is
  122. // just a for-each method that is passed a lambda to observe all entries. The
  123. // order of this observation is also not guaranteed.
  124. namespace Carbon::RawHashtable {
  125. // Which prefetch strategies to enable can be controlled via macros to enable
  126. // doing experiments.
  127. //
  128. // Currently, benchmarking on both modern AMD and ARM CPUs seems to indicate
  129. // that the entry group prefetching is more beneficial than metadata, but that
  130. // benefit is degraded when enabling them both. This determined our current
  131. // default of no metadata prefetch but enabled entry group prefetch.
  132. //
  133. // Override these by defining them as part of the build explicitly to either `0`
  134. // or `1`. If left undefined, the defaults will be supplied.
  135. #ifndef CARBON_ENABLE_PREFETCH_METADATA
  136. #define CARBON_ENABLE_PREFETCH_METADATA 0
  137. #endif
  138. #ifndef CARBON_ENABLE_PREFETCH_ENTRY_GROUP
  139. #define CARBON_ENABLE_PREFETCH_ENTRY_GROUP 1
  140. #endif
  141. // If allocating storage, allocate a minimum of one cacheline of group metadata
  142. // or a minimum of one group, whichever is larger.
  143. constexpr ssize_t MinAllocatedSize = std::max<ssize_t>(64, MaxGroupSize);
  144. // An entry in the hashtable storage of a `KeyT` and `ValueT` object.
  145. //
  146. // Allows manual construction, destruction, and access to these values so we can
  147. // create arrays af the entries prior to populating them with actual keys and
  148. // values.
  149. template <typename KeyT, typename ValueT>
  150. struct StorageEntry {
  151. static constexpr bool IsTriviallyDestructible =
  152. std::is_trivially_destructible_v<KeyT> &&
  153. std::is_trivially_destructible_v<ValueT>;
  154. static constexpr bool IsTriviallyRelocatable =
  155. IsTriviallyDestructible && std::is_trivially_move_constructible_v<KeyT> &&
  156. std::is_trivially_move_constructible_v<ValueT>;
  157. static constexpr bool IsCopyable =
  158. IsTriviallyRelocatable || (std::is_copy_constructible_v<KeyT> &&
  159. std::is_copy_constructible_v<ValueT>);
  160. auto key() const -> const KeyT& {
  161. // Ensure we don't need more alignment than available. Inside a method body
  162. // to apply to the complete type.
  163. static_assert(
  164. alignof(StorageEntry) <= MinAllocatedSize,
  165. "The minimum allocated size turns into the alignment of our array of "
  166. "storage entries as they follow the metadata byte array.");
  167. return *std::launder(reinterpret_cast<const KeyT*>(&key_storage));
  168. }
  169. auto key() -> KeyT& {
  170. return const_cast<KeyT&>(const_cast<const StorageEntry*>(this)->key());
  171. }
  172. auto value() const -> const ValueT& {
  173. return *std::launder(reinterpret_cast<const ValueT*>(&value_storage));
  174. }
  175. auto value() -> ValueT& {
  176. return const_cast<ValueT&>(const_cast<const StorageEntry*>(this)->value());
  177. }
  178. // We handle destruction and move manually as we only want to expose distinct
  179. // `KeyT` and `ValueT` subobjects to user code that may need to do in-place
  180. // construction. As a consequence, this struct only provides the storage and
  181. // we have to manually manage the construction, move, and destruction of the
  182. // objects.
  183. auto Destroy() -> void {
  184. static_assert(!IsTriviallyDestructible,
  185. "Should never instantiate when trivial!");
  186. key().~KeyT();
  187. value().~ValueT();
  188. }
  189. auto CopyFrom(const StorageEntry& entry) -> void {
  190. if constexpr (IsTriviallyRelocatable) {
  191. memcpy(this, &entry, sizeof(StorageEntry));
  192. } else {
  193. new (&key_storage) KeyT(entry.key());
  194. new (&value_storage) ValueT(entry.value());
  195. }
  196. }
  197. // Move from an expiring entry and destroy that entry's key and value.
  198. // Optimizes to directly use `memcpy` when correct.
  199. auto MoveFrom(StorageEntry&& entry) -> void {
  200. if constexpr (IsTriviallyRelocatable) {
  201. memcpy(this, &entry, sizeof(StorageEntry));
  202. } else {
  203. new (&key_storage) KeyT(std::move(entry.key()));
  204. entry.key().~KeyT();
  205. new (&value_storage) ValueT(std::move(entry.value()));
  206. entry.value().~ValueT();
  207. }
  208. }
  209. alignas(KeyT) std::byte key_storage[sizeof(KeyT)];
  210. alignas(ValueT) std::byte value_storage[sizeof(ValueT)];
  211. };
  212. // A specialization of the storage entry for sets without a distinct value type.
  213. // Somewhat duplicative with the key-value version, but C++ specialization makes
  214. // doing better difficult.
  215. template <typename KeyT>
  216. struct StorageEntry<KeyT, void> {
  217. static constexpr bool IsTriviallyDestructible =
  218. std::is_trivially_destructible_v<KeyT>;
  219. static constexpr bool IsTriviallyRelocatable =
  220. IsTriviallyDestructible && std::is_trivially_move_constructible_v<KeyT>;
  221. static constexpr bool IsCopyable =
  222. IsTriviallyRelocatable || std::is_copy_constructible_v<KeyT>;
  223. auto key() const -> const KeyT& {
  224. // Ensure we don't need more alignment than available.
  225. static_assert(
  226. alignof(StorageEntry) <= MinAllocatedSize,
  227. "The minimum allocated size turns into the alignment of our array of "
  228. "storage entries as they follow the metadata byte array.");
  229. return *std::launder(reinterpret_cast<const KeyT*>(&key_storage));
  230. }
  231. auto key() -> KeyT& {
  232. return const_cast<KeyT&>(const_cast<const StorageEntry*>(this)->key());
  233. }
  234. auto Destroy() -> void {
  235. static_assert(!IsTriviallyDestructible,
  236. "Should never instantiate when trivial!");
  237. key().~KeyT();
  238. }
  239. auto CopyFrom(const StorageEntry& entry) -> void
  240. requires(IsCopyable)
  241. {
  242. if constexpr (IsTriviallyRelocatable) {
  243. memcpy(this, &entry, sizeof(StorageEntry));
  244. } else {
  245. new (&key_storage) KeyT(entry.key());
  246. }
  247. }
  248. auto MoveFrom(StorageEntry&& entry) -> void {
  249. if constexpr (IsTriviallyRelocatable) {
  250. memcpy(this, &entry, sizeof(StorageEntry));
  251. } else {
  252. new (&key_storage) KeyT(std::move(entry.key()));
  253. entry.key().~KeyT();
  254. }
  255. }
  256. alignas(KeyT) std::byte key_storage[sizeof(KeyT)];
  257. };
  258. struct Metrics {
  259. // How many keys are present in the table.
  260. ssize_t key_count = 0;
  261. // How many slots of the table are reserved due to deleted markers required to
  262. // preserve probe sequences.
  263. ssize_t deleted_count = 0;
  264. // How many bytes of allocated storage are used by the table. Note, does not
  265. // include the table object or any small-size buffer.
  266. ssize_t storage_bytes = 0;
  267. // How many keys have required probing beyond the initial group. These are the
  268. // keys with a probe distance > 0.
  269. ssize_t probed_key_count = 0;
  270. // The probe distance averaged over every key. If every key is in its initial
  271. // group, this will be zero as no keys will have a larger probe distance. In
  272. // general, we want this to be as close to zero as possible.
  273. double probe_avg_distance = 0.0;
  274. // The maximum probe distance found for a single key in the table.
  275. ssize_t probe_max_distance = 0;
  276. // The average number of probing comparisons required to locate a specific key
  277. // in the table. This is how many comparisons are required *before* the key is
  278. // located, or the *failed* comparisons. We always have to do one successful
  279. // comparison at the end. This successful comparison isn't counted because
  280. // that focuses this metric on the overhead the table is introducing, and
  281. // keeps a "perfect" table with an average of `0.0` here similar to the
  282. // perfect average of `0.0` average probe distance.
  283. double probe_avg_compares = 0.0;
  284. // The maximum number of probing comparisons required to locate a specific
  285. // key in the table.
  286. ssize_t probe_max_compares = 0;
  287. };
  288. // A placeholder empty type used to model pointers to the allocated buffer of
  289. // storage.
  290. //
  291. // The allocated storage doesn't have a meaningful static layout -- it consists
  292. // of an array of metadata groups followed by an array of storage entries.
  293. // However, we want to be able to mark pointers to this and so use pointers to
  294. // this placeholder type as that signifier.
  295. //
  296. // This is a complete, empty type so that it can be used as a base class of a
  297. // specific concrete storage type for compile-time sized storage.
  298. struct Storage {};
  299. // Forward declaration to support friending, see the definition below.
  300. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  301. class BaseImpl;
  302. // Implementation helper for defining a read-only view type for a hashtable.
  303. //
  304. // A specific user-facing hashtable view type should derive privately from this
  305. // type, and forward the implementation of its interface to functions in this
  306. // type.
  307. //
  308. // The methods available to user-facing hashtable types are `protected`, and
  309. // where they are expected to directly map to a public API, named with an
  310. // `Impl`. The suffix naming ensures types don't `using` in these low-level APIs
  311. // but declare their own and implement them by forwarding to these APIs. We
  312. // don't want users to have to read these implementation details to understand
  313. // their container's API, so none of these methods should be `using`-ed into the
  314. // user facing types.
  315. //
  316. // Some of the types are just convenience aliases and aren't important to
  317. // surface as part of the user-facing type API for readers and so those are
  318. // reasonable to add via a `using`.
  319. //
  320. // Some methods are used by other parts of the raw hashtable implementation.
  321. // Those are kept `private` and where necessary the other components of the raw
  322. // hashtable implementation are friended to give access to them.
  323. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  324. class ViewImpl {
  325. protected:
  326. using KeyT = InputKeyT;
  327. using ValueT = InputValueT;
  328. using KeyContextT = InputKeyContextT;
  329. using EntryT = StorageEntry<KeyT, ValueT>;
  330. using MetricsT = Metrics;
  331. friend class BaseImpl<KeyT, ValueT, KeyContextT>;
  332. template <typename InputBaseT, ssize_t SmallSize>
  333. friend class TableImpl;
  334. // Make more-`const` types friends to enable conversions that add `const`.
  335. friend class ViewImpl<const KeyT, ValueT, KeyContextT>;
  336. friend class ViewImpl<KeyT, const ValueT, KeyContextT>;
  337. friend class ViewImpl<const KeyT, const ValueT, KeyContextT>;
  338. ViewImpl() = default;
  339. // Support adding `const` to either key or value type of some other view.
  340. template <typename OtherKeyT, typename OtherValueT>
  341. // NOLINTNEXTLINE(google-explicit-constructor)
  342. ViewImpl(ViewImpl<OtherKeyT, OtherValueT, KeyContextT> other_view)
  343. requires(std::same_as<KeyT, OtherKeyT> ||
  344. std::same_as<KeyT, const OtherKeyT>) &&
  345. (std::same_as<ValueT, OtherValueT> ||
  346. std::same_as<ValueT, const OtherValueT>)
  347. : alloc_size_(other_view.alloc_size_), storage_(other_view.storage_) {}
  348. // Looks up an entry in the hashtable and returns its address or null if not
  349. // present.
  350. template <typename LookupKeyT>
  351. auto LookupEntry(LookupKeyT lookup_key, KeyContextT key_context) const
  352. -> EntryT*;
  353. // Calls `entry_callback` for each entry in the hashtable. All the entries
  354. // within a specific group are visited first, and then `group_callback` is
  355. // called on the group itself. The `group_callback` is typically only used by
  356. // the internals of the hashtable.
  357. template <typename EntryCallbackT, typename GroupCallbackT>
  358. auto ForEachEntry(EntryCallbackT entry_callback,
  359. GroupCallbackT group_callback) const -> void;
  360. // Returns a collection of informative metrics on the the current state of the
  361. // table, useful for performance analysis. These include relatively slow to
  362. // compute metrics requiring deep inspection of the table's state.
  363. auto ComputeMetricsImpl(KeyContextT key_context) const -> MetricsT;
  364. private:
  365. ViewImpl(ssize_t alloc_size, Storage* storage)
  366. : alloc_size_(alloc_size), storage_(storage) {}
  367. // Computes the offset from the metadata array to the entries array for a
  368. // given size. This is trivial, but we use this routine to enforce invariants
  369. // on the sizes.
  370. static constexpr auto EntriesOffset(ssize_t alloc_size) -> ssize_t {
  371. CARBON_DCHECK(llvm::isPowerOf2_64(alloc_size),
  372. "Size must be a power of two for a hashed buffer!");
  373. // The size is always a power of two. We prevent any too-small sizes so it
  374. // being a power of two provides the needed alignment. As a result, the
  375. // offset is exactly the size. We validate this here to catch alignment bugs
  376. // early.
  377. CARBON_DCHECK(static_cast<uint64_t>(alloc_size) ==
  378. llvm::alignTo<alignof(EntryT)>(alloc_size));
  379. return alloc_size;
  380. }
  381. // Compute the allocated table's byte size.
  382. static constexpr auto AllocByteSize(ssize_t alloc_size) -> ssize_t {
  383. return EntriesOffset(alloc_size) + sizeof(EntryT) * alloc_size;
  384. }
  385. auto metadata() const -> uint8_t* {
  386. return reinterpret_cast<uint8_t*>(storage_);
  387. }
  388. auto entries() const -> EntryT* {
  389. return reinterpret_cast<EntryT*>(reinterpret_cast<std::byte*>(storage_) +
  390. EntriesOffset(alloc_size_));
  391. }
  392. // Prefetch the metadata prior to probing. This is to overlap any of the
  393. // memory access latency we can with the hashing of a key or other
  394. // latency-bound operation prior to probing.
  395. auto PrefetchMetadata() const -> void {
  396. if constexpr (CARBON_ENABLE_PREFETCH_METADATA) {
  397. // Prefetch with a "low" temporal locality as we're primarily expecting a
  398. // brief use of the metadata and then to return to application code.
  399. __builtin_prefetch(metadata(), /*read*/ 0, /*low-locality*/ 1);
  400. }
  401. }
  402. // Prefetch an entry. This prefetches for read as it is primarily expected to
  403. // be used in the probing path, and writing afterwards isn't especially slowed
  404. // down. We don't want to synthesize writes unless we *know* we're going to
  405. // write.
  406. static auto PrefetchEntryGroup(const EntryT* entry_group) -> void {
  407. if constexpr (CARBON_ENABLE_PREFETCH_ENTRY_GROUP) {
  408. // Prefetch with a "low" temporal locality as we're primarily expecting a
  409. // brief use of the entries and then to return to application code.
  410. __builtin_prefetch(entry_group, /*read*/ 0, /*low-locality*/ 1);
  411. }
  412. }
  413. ssize_t alloc_size_;
  414. Storage* storage_;
  415. };
  416. // Implementation helper for defining a read-write base type for a hashtable
  417. // that type-erases any SSO buffer.
  418. //
  419. // A specific user-facing hashtable base type should derive using *`protected`*
  420. // inheritance from this type, and forward the implementation of its interface
  421. // to functions in this type.
  422. //
  423. // Other than the use of `protected` inheritance, the patterns for this type,
  424. // and how to build user-facing hashtable base types from it, mirror those of
  425. // `ViewImpl`. See its documentation for more details.
  426. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  427. class BaseImpl {
  428. protected:
  429. using KeyT = InputKeyT;
  430. using ValueT = InputValueT;
  431. using KeyContextT = InputKeyContextT;
  432. using ViewImplT = ViewImpl<KeyT, ValueT, KeyContextT>;
  433. using EntryT = typename ViewImplT::EntryT;
  434. using MetricsT = typename ViewImplT::MetricsT;
  435. BaseImpl(int small_alloc_size, Storage* small_storage)
  436. : small_alloc_size_(small_alloc_size) {
  437. CARBON_CHECK(small_alloc_size >= 0);
  438. Construct(small_storage);
  439. }
  440. // Only used for copying and moving, and leaves storage uninitialized.
  441. BaseImpl(ssize_t alloc_size, int growth_budget, int small_alloc_size)
  442. : view_impl_(alloc_size, nullptr),
  443. growth_budget_(growth_budget),
  444. small_alloc_size_(small_alloc_size) {}
  445. // Destruction must be handled by the table where it can destroy entries in
  446. // any small buffer, so make the base destructor protected but defaulted here.
  447. ~BaseImpl() = default;
  448. // NOLINTNEXTLINE(google-explicit-constructor): Designed to implicitly decay.
  449. operator ViewImplT() const { return view_impl(); }
  450. auto view_impl() const -> ViewImplT { return view_impl_; }
  451. // Looks up the provided key in the hashtable. If found, returns a pointer to
  452. // that entry and `false`.
  453. //
  454. // If not found, will locate an empty entry for inserting into, set the
  455. // metadata for that entry, and return a pointer to the entry and `true`. When
  456. // necessary, this will grow the hashtable to cause there to be sufficient
  457. // empty entries.
  458. template <typename LookupKeyT>
  459. auto InsertImpl(LookupKeyT lookup_key, KeyContextT key_context)
  460. -> std::pair<EntryT*, bool>;
  461. // Grow the table to specific allocation size.
  462. //
  463. // This will grow the the table if necessary for it to have an allocation size
  464. // of `target_alloc_size` which must be a power of two. Note that this will
  465. // not allow that many keys to be inserted into the hashtable, but a smaller
  466. // number based on the load factor. If a specific number of insertions need to
  467. // be achieved without triggering growth, use the `GrowForInsertCountImpl`
  468. // method.
  469. auto GrowToAllocSizeImpl(ssize_t target_alloc_size, KeyContextT key_context)
  470. -> void;
  471. // Grow the table to allow inserting the specified number of keys.
  472. auto GrowForInsertCountImpl(ssize_t count, KeyContextT key_context) -> void;
  473. // Looks up the entry in the hashtable, and if found destroys the entry and
  474. // returns `true`. If not found, returns `false`.
  475. //
  476. // Does not release any memory, just leaves a tombstone behind so this entry
  477. // cannot be found and the slot can in theory be reused.
  478. template <typename LookupKeyT>
  479. auto EraseImpl(LookupKeyT lookup_key, KeyContextT key_context) -> bool;
  480. // Erases all entries in the hashtable but leaves the allocated storage.
  481. auto ClearImpl() -> void;
  482. private:
  483. template <typename InputBaseT, ssize_t SmallSize>
  484. friend class TableImpl;
  485. static constexpr ssize_t Alignment = std::max<ssize_t>(
  486. alignof(MetadataGroup), alignof(StorageEntry<KeyT, ValueT>));
  487. // Implementation of inline small storage for the provided key type, value
  488. // type, and small size. Specialized for a zero small size to be an empty
  489. // struct.
  490. template <ssize_t SmallSize>
  491. struct SmallStorage : Storage {
  492. alignas(Alignment) uint8_t metadata[SmallSize];
  493. mutable StorageEntry<KeyT, ValueT> entries[SmallSize];
  494. };
  495. // Specialized storage with no inline buffer to avoid any extra alignment.
  496. template <>
  497. struct SmallStorage<0> {};
  498. static auto Allocate(ssize_t alloc_size) -> Storage*;
  499. static auto Deallocate(Storage* storage, ssize_t alloc_size) -> void;
  500. auto growth_budget() const -> ssize_t { return growth_budget_; }
  501. auto alloc_size() const -> ssize_t { return view_impl_.alloc_size_; }
  502. auto alloc_size() -> ssize_t& { return view_impl_.alloc_size_; }
  503. auto storage() const -> Storage* { return view_impl_.storage_; }
  504. auto storage() -> Storage*& { return view_impl_.storage_; }
  505. auto metadata() const -> uint8_t* { return view_impl_.metadata(); }
  506. auto entries() const -> EntryT* { return view_impl_.entries(); }
  507. auto small_alloc_size() const -> ssize_t {
  508. return static_cast<unsigned>(small_alloc_size_);
  509. }
  510. auto is_small() const -> bool {
  511. CARBON_DCHECK(alloc_size() >= small_alloc_size());
  512. return alloc_size() == small_alloc_size();
  513. }
  514. // Wrapper to call `ViewImplT::PrefetchStorage`, see that method for details.
  515. auto PrefetchStorage() const -> void { view_impl_.PrefetchMetadata(); }
  516. auto Construct(Storage* small_storage) -> void;
  517. auto Destroy() -> void;
  518. auto CopySlotsFrom(const BaseImpl& arg) -> void
  519. requires(EntryT::IsCopyable);
  520. auto MoveFrom(BaseImpl&& arg, Storage* small_storage) -> void;
  521. auto InsertIntoEmpty(HashCode hash) -> EntryT*;
  522. static auto ComputeNextAllocSize(ssize_t old_alloc_size) -> ssize_t;
  523. static auto GrowthThresholdForAllocSize(ssize_t alloc_size) -> ssize_t;
  524. auto GrowToNextAllocSize(KeyContextT key_context) -> void;
  525. auto GrowAndInsert(HashCode hash, KeyContextT key_context) -> EntryT*;
  526. ViewImplT view_impl_;
  527. int growth_budget_;
  528. int small_alloc_size_;
  529. };
  530. // Implementation helper for defining a hashtable type with an SSO buffer.
  531. //
  532. // A specific user-facing hashtable should derive privately from this
  533. // type, and forward the implementation of its interface to functions in this
  534. // type. It should provide the corresponding user-facing hashtable base type as
  535. // the `InputBaseT` type parameter (rather than a key/value pair), and this type
  536. // will in turn derive from that provided base type. This allows derived-to-base
  537. // conversion from the user-facing hashtable type to the user-facing hashtable
  538. // base type. And it does so keeping the inheritance linear. The resulting
  539. // linear inheritance hierarchy for a `Map<K, T>` type will look like:
  540. //
  541. // Map<K, T>
  542. // ↓
  543. // TableImpl<MapBase<K, T>>
  544. // ↓
  545. // MapBase<K, T>
  546. // ↓
  547. // BaseImpl<K, T>
  548. //
  549. // Other than this inheritance technique, the patterns for this type, and how to
  550. // build user-facing hashtable types from it, mirror those of `ViewImpl`. See
  551. // its documentation for more details.
  552. template <typename InputBaseT, ssize_t SmallSize>
  553. class TableImpl : public InputBaseT {
  554. protected:
  555. using BaseT = InputBaseT;
  556. TableImpl() : BaseT(SmallSize, small_storage()) {}
  557. TableImpl(const TableImpl& arg)
  558. requires(BaseT::EntryT::IsCopyable);
  559. TableImpl(TableImpl&& arg) noexcept;
  560. auto operator=(const TableImpl& arg) -> TableImpl&
  561. requires(BaseT::EntryT::IsCopyable);
  562. auto operator=(TableImpl&& arg) noexcept -> TableImpl&;
  563. ~TableImpl();
  564. // Resets the hashtable to its initial state, clearing all entries and
  565. // releasing all memory. If the hashtable had an SSO buffer, that is restored
  566. // as the storage. Otherwise, a minimum sized table storage is allocated.
  567. auto ResetImpl() -> void;
  568. private:
  569. using KeyT = BaseT::KeyT;
  570. using ValueT = BaseT::ValueT;
  571. using EntryT = BaseT::EntryT;
  572. using SmallStorage = BaseT::template SmallStorage<SmallSize>;
  573. auto small_storage() const -> Storage*;
  574. auto SetUpStorage() -> void;
  575. [[no_unique_address]] mutable SmallStorage small_storage_;
  576. };
  577. ////////////////////////////////////////////////////////////////////////////////
  578. //
  579. // Only implementation details below this point.
  580. //
  581. ////////////////////////////////////////////////////////////////////////////////
  582. // Computes a seed that provides a small amount of entropy from ASLR where
  583. // available with minimal cost. The priority is speed, and this computes the
  584. // entropy in a way that doesn't require loading from memory, merely accessing
  585. // entropy already available without accessing memory.
  586. inline auto ComputeSeed() -> uint64_t {
  587. // A global variable whose address is used as a seed. This allows ASLR to
  588. // introduce some variation in hashtable ordering when enabled via the code
  589. // model for globals.
  590. extern volatile std::byte global_addr_seed;
  591. return reinterpret_cast<uint64_t>(&global_addr_seed);
  592. }
  593. inline auto ComputeProbeMaskFromSize(ssize_t size) -> size_t {
  594. CARBON_DCHECK(llvm::isPowerOf2_64(size),
  595. "Size must be a power of two for a hashed buffer!");
  596. // Since `size` is a power of two, we can make sure the probes are less
  597. // than `size` by making the mask `size - 1`. We also mask off the low
  598. // bits so the probes are a multiple of the size of the groups of entries.
  599. return (size - 1) & ~GroupMask;
  600. }
  601. // This class handles building a sequence of probe indices from a given
  602. // starting point, including both the quadratic growth and masking the index
  603. // to stay within the bucket array size. The starting point doesn't need to be
  604. // clamped to the size ahead of time (or even be positive), we will do it
  605. // internally.
  606. //
  607. // For reference on quadratic probing:
  608. // https://en.wikipedia.org/wiki/Quadratic_probing
  609. //
  610. // We compute the quadratic probe index incrementally, but we can also compute
  611. // it mathematically and will check that the incremental result matches our
  612. // mathematical expectation. We use the quadratic probing formula of:
  613. //
  614. // p(start, step) = (start + (step + step^2) / 2) (mod size / GroupSize)
  615. //
  616. // However, we compute it incrementally and scale all the variables by the group
  617. // size so it can be used as an index without an additional multiplication.
  618. class ProbeSequence {
  619. public:
  620. ProbeSequence(ssize_t start, ssize_t size) {
  621. mask_ = ComputeProbeMaskFromSize(size);
  622. p_ = start & mask_;
  623. #ifndef NDEBUG
  624. start_ = start & mask_;
  625. size_ = size;
  626. #endif
  627. }
  628. auto Next() -> void {
  629. step_ += GroupSize;
  630. p_ = (p_ + step_) & mask_;
  631. #ifndef NDEBUG
  632. // Verify against the quadratic formula we expect to be following by scaling
  633. // everything down by `GroupSize`.
  634. CARBON_DCHECK(
  635. (p_ / GroupSize) ==
  636. ((start_ / GroupSize +
  637. (step_ / GroupSize + (step_ / GroupSize) * (step_ / GroupSize)) /
  638. 2) %
  639. (size_ / GroupSize)),
  640. "Index in probe sequence does not match the expected formula.");
  641. CARBON_DCHECK(step_ < size_,
  642. "We necessarily visit all groups, so we can't have more "
  643. "probe steps than groups.");
  644. #endif
  645. }
  646. auto index() const -> ssize_t { return p_; }
  647. private:
  648. ssize_t step_ = 0;
  649. size_t mask_;
  650. ssize_t p_;
  651. #ifndef NDEBUG
  652. ssize_t start_;
  653. ssize_t size_;
  654. #endif
  655. };
  656. // TODO: Evaluate keeping this outlined to see if macro benchmarks observe the
  657. // same perf hit as micro benchmarks.
  658. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  659. template <typename LookupKeyT>
  660. auto ViewImpl<InputKeyT, InputValueT, InputKeyContextT>::LookupEntry(
  661. LookupKeyT lookup_key, KeyContextT key_context) const -> EntryT* {
  662. PrefetchMetadata();
  663. ssize_t local_size = alloc_size_;
  664. CARBON_DCHECK(local_size > 0);
  665. uint8_t* local_metadata = metadata();
  666. HashCode hash = key_context.HashKey(lookup_key, ComputeSeed());
  667. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  668. EntryT* local_entries = entries();
  669. // Walk through groups of entries using a quadratic probe starting from
  670. // `hash_index`.
  671. ProbeSequence s(hash_index, local_size);
  672. do {
  673. ssize_t group_index = s.index();
  674. // Load the group's metadata and prefetch the entries for this group. The
  675. // prefetch here helps hide key access latency while we're matching the
  676. // metadata.
  677. MetadataGroup g = MetadataGroup::Load(local_metadata, group_index);
  678. EntryT* group_entries = &local_entries[group_index];
  679. PrefetchEntryGroup(group_entries);
  680. // For each group, match the tag against the metadata to extract the
  681. // potentially matching entries within the group.
  682. auto metadata_matched_range = g.Match(tag);
  683. if (LLVM_LIKELY(metadata_matched_range)) {
  684. // If any entries in this group potentially match based on their metadata,
  685. // walk each candidate and compare its key to see if we have definitively
  686. // found a match.
  687. auto byte_it = metadata_matched_range.begin();
  688. auto byte_end = metadata_matched_range.end();
  689. do {
  690. EntryT* entry = byte_it.index_ptr(group_entries);
  691. if (LLVM_LIKELY(key_context.KeyEq(lookup_key, entry->key()))) {
  692. __builtin_assume(entry != nullptr);
  693. return entry;
  694. }
  695. ++byte_it;
  696. } while (LLVM_UNLIKELY(byte_it != byte_end));
  697. }
  698. // We failed to find a matching entry in this bucket, so check if there are
  699. // empty slots as that indicates we're done probing -- no later probed index
  700. // could have a match.
  701. auto empty_byte_matched_range = g.MatchEmpty();
  702. if (LLVM_LIKELY(empty_byte_matched_range)) {
  703. return nullptr;
  704. }
  705. s.Next();
  706. // We use a weird construct of an "unlikely" condition of `true`. The goal
  707. // is to get the compiler to not prioritize the back edge of the loop for
  708. // code layout, and in at least some tests this seems to be an effective
  709. // construct for achieving this.
  710. } while (LLVM_UNLIKELY(true));
  711. }
  712. // Note that we force inlining here because we expect to be called with lambdas
  713. // that will in turn be inlined to form the loop body. We don't want function
  714. // boundaries within the loop for performance, and recognizing the degree of
  715. // simplification from inlining these callbacks may be difficult to
  716. // automatically recognize.
  717. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  718. template <typename EntryCallbackT, typename GroupCallbackT>
  719. [[clang::always_inline]] auto
  720. ViewImpl<InputKeyT, InputValueT, InputKeyContextT>::ForEachEntry(
  721. EntryCallbackT entry_callback, GroupCallbackT group_callback) const
  722. -> void {
  723. uint8_t* local_metadata = metadata();
  724. EntryT* local_entries = entries();
  725. ssize_t local_size = alloc_size_;
  726. for (ssize_t group_index = 0; group_index < local_size;
  727. group_index += GroupSize) {
  728. auto g = MetadataGroup::Load(local_metadata, group_index);
  729. auto present_matched_range = g.MatchPresent();
  730. if (!present_matched_range) {
  731. continue;
  732. }
  733. for (ssize_t byte_index : present_matched_range) {
  734. entry_callback(local_entries[group_index + byte_index]);
  735. }
  736. group_callback(&local_metadata[group_index]);
  737. }
  738. }
  739. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  740. auto ViewImpl<InputKeyT, InputValueT, InputKeyContextT>::ComputeMetricsImpl(
  741. KeyContextT key_context) const -> Metrics {
  742. uint8_t* local_metadata = metadata();
  743. EntryT* local_entries = entries();
  744. ssize_t local_size = alloc_size_;
  745. Metrics metrics;
  746. // Compute the ones we can directly.
  747. metrics.deleted_count = llvm::count(
  748. llvm::ArrayRef(local_metadata, local_size), MetadataGroup::Deleted);
  749. metrics.storage_bytes = AllocByteSize(local_size);
  750. // We want to process present slots specially to collect metrics on their
  751. // probing behavior.
  752. for (ssize_t group_index = 0; group_index < local_size;
  753. group_index += GroupSize) {
  754. auto g = MetadataGroup::Load(local_metadata, group_index);
  755. auto present_matched_range = g.MatchPresent();
  756. for (ssize_t byte_index : present_matched_range) {
  757. ++metrics.key_count;
  758. ssize_t index = group_index + byte_index;
  759. HashCode hash =
  760. key_context.HashKey(local_entries[index].key(), ComputeSeed());
  761. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  762. ProbeSequence s(hash_index, local_size);
  763. metrics.probed_key_count +=
  764. static_cast<ssize_t>(s.index() != group_index);
  765. // For each probed key, go through the probe sequence to find both the
  766. // probe distance and how many comparisons are required.
  767. ssize_t distance = 0;
  768. ssize_t compares = 0;
  769. for (; s.index() != group_index; s.Next()) {
  770. auto probe_g = MetadataGroup::Load(local_metadata, s.index());
  771. auto probe_matched_range = probe_g.Match(tag);
  772. compares += std::distance(probe_matched_range.begin(),
  773. probe_matched_range.end());
  774. distance += 1;
  775. }
  776. auto probe_g = MetadataGroup::Load(local_metadata, s.index());
  777. auto probe_matched_range = probe_g.Match(tag);
  778. CARBON_CHECK(!probe_matched_range.empty());
  779. for (ssize_t match_index : probe_matched_range) {
  780. if (match_index >= byte_index) {
  781. // Note we only count the compares that will *fail* as part of
  782. // probing. The last successful compare isn't interesting, it is
  783. // always needed.
  784. break;
  785. }
  786. compares += 1;
  787. }
  788. metrics.probe_avg_distance += distance;
  789. metrics.probe_max_distance =
  790. std::max(metrics.probe_max_distance, distance);
  791. metrics.probe_avg_compares += compares;
  792. metrics.probe_max_compares =
  793. std::max(metrics.probe_max_compares, compares);
  794. }
  795. }
  796. if (metrics.key_count > 0) {
  797. metrics.probe_avg_compares /= metrics.key_count;
  798. metrics.probe_avg_distance /= metrics.key_count;
  799. }
  800. return metrics;
  801. }
  802. // TODO: Evaluate whether it is worth forcing this out-of-line given the
  803. // reasonable ABI boundary it forms and large volume of code necessary to
  804. // implement it.
  805. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  806. template <typename LookupKeyT>
  807. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::InsertImpl(
  808. LookupKeyT lookup_key, KeyContextT key_context)
  809. -> std::pair<EntryT*, bool> {
  810. CARBON_DCHECK(alloc_size() > 0);
  811. PrefetchStorage();
  812. uint8_t* local_metadata = metadata();
  813. HashCode hash = key_context.HashKey(lookup_key, ComputeSeed());
  814. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  815. // We re-purpose the empty control byte to signal no insert is needed to the
  816. // caller. This is guaranteed to not be a control byte we're inserting.
  817. // constexpr uint8_t NoInsertNeeded = Group::Empty;
  818. ssize_t group_with_deleted_index;
  819. MetadataGroup::MatchIndex deleted_match = {};
  820. EntryT* local_entries = entries();
  821. auto return_insert_at_index = [&](ssize_t index) -> std::pair<EntryT*, bool> {
  822. // We'll need to insert at this index so set the control group byte to the
  823. // proper value.
  824. local_metadata[index] = tag | MetadataGroup::PresentMask;
  825. return {&local_entries[index], true};
  826. };
  827. for (ProbeSequence s(hash_index, alloc_size());; s.Next()) {
  828. ssize_t group_index = s.index();
  829. // Load the group's metadata and prefetch the entries for this group. The
  830. // prefetch here helps hide key access latency while we're matching the
  831. // metadata.
  832. auto g = MetadataGroup::Load(local_metadata, group_index);
  833. EntryT* group_entries = &local_entries[group_index];
  834. ViewImplT::PrefetchEntryGroup(group_entries);
  835. auto control_byte_matched_range = g.Match(tag);
  836. if (control_byte_matched_range) {
  837. auto byte_it = control_byte_matched_range.begin();
  838. auto byte_end = control_byte_matched_range.end();
  839. do {
  840. EntryT* entry = byte_it.index_ptr(group_entries);
  841. if (LLVM_LIKELY(key_context.KeyEq(lookup_key, entry->key()))) {
  842. return {entry, false};
  843. }
  844. ++byte_it;
  845. } while (LLVM_UNLIKELY(byte_it != byte_end));
  846. }
  847. // Track the first group with a deleted entry that we could insert over.
  848. if (!deleted_match) {
  849. deleted_match = g.MatchDeleted();
  850. group_with_deleted_index = group_index;
  851. }
  852. // We failed to find a matching entry in this bucket, so check if there are
  853. // no empty slots. In that case, we'll continue probing.
  854. auto empty_match = g.MatchEmpty();
  855. if (!empty_match) {
  856. continue;
  857. }
  858. // Ok, we've finished probing without finding anything and need to insert
  859. // instead.
  860. // If we found a deleted slot, we don't need the probe sequence to insert
  861. // so just bail. We want to ensure building up a table is fast so we
  862. // de-prioritize this a bit. In practice this doesn't have too much of an
  863. // effect.
  864. if (LLVM_UNLIKELY(deleted_match)) {
  865. return return_insert_at_index(group_with_deleted_index +
  866. deleted_match.index());
  867. }
  868. // We're going to need to grow by inserting into an empty slot. Check that
  869. // we have the budget for that before we compute the exact index of the
  870. // empty slot. Without the growth budget we'll have to completely rehash and
  871. // so we can just bail here.
  872. if (LLVM_UNLIKELY(growth_budget_ == 0)) {
  873. return {GrowAndInsert(hash, key_context), true};
  874. }
  875. --growth_budget_;
  876. CARBON_DCHECK(growth_budget() >= 0,
  877. "Growth budget shouldn't have gone negative!");
  878. return return_insert_at_index(group_index + empty_match.index());
  879. }
  880. CARBON_FATAL(
  881. "We should never finish probing without finding the entry or an empty "
  882. "slot.");
  883. }
  884. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  885. [[clang::noinline]] auto
  886. BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowToAllocSizeImpl(
  887. ssize_t target_alloc_size, KeyContextT key_context) -> void {
  888. CARBON_CHECK(llvm::isPowerOf2_64(target_alloc_size));
  889. if (target_alloc_size <= alloc_size()) {
  890. return;
  891. }
  892. // If this is the next alloc size, we can used our optimized growth strategy.
  893. if (target_alloc_size == ComputeNextAllocSize(alloc_size())) {
  894. GrowToNextAllocSize(key_context);
  895. return;
  896. }
  897. // Create locals for the old state of the table.
  898. ssize_t old_size = alloc_size();
  899. CARBON_DCHECK(old_size > 0);
  900. bool old_small = is_small();
  901. Storage* old_storage = storage();
  902. uint8_t* old_metadata = metadata();
  903. EntryT* old_entries = entries();
  904. // Configure for the new size and allocate the new storage.
  905. alloc_size() = target_alloc_size;
  906. storage() = Allocate(target_alloc_size);
  907. std::memset(metadata(), 0, target_alloc_size);
  908. growth_budget_ = GrowthThresholdForAllocSize(target_alloc_size);
  909. // Just re-insert all the entries. As we're more than doubling the table size,
  910. // we don't bother with fancy optimizations here. Even using `memcpy` for the
  911. // entries seems unlikely to be a significant win given how sparse the
  912. // insertions will end up being.
  913. ssize_t count = 0;
  914. for (ssize_t group_index = 0; group_index < old_size;
  915. group_index += GroupSize) {
  916. auto g = MetadataGroup::Load(old_metadata, group_index);
  917. auto present_matched_range = g.MatchPresent();
  918. for (ssize_t byte_index : present_matched_range) {
  919. ++count;
  920. ssize_t index = group_index + byte_index;
  921. HashCode hash =
  922. key_context.HashKey(old_entries[index].key(), ComputeSeed());
  923. EntryT* new_entry = InsertIntoEmpty(hash);
  924. new_entry->MoveFrom(std::move(old_entries[index]));
  925. }
  926. }
  927. growth_budget_ -= count;
  928. if (!old_small) {
  929. // Old isn't a small buffer, so we need to deallocate it.
  930. Deallocate(old_storage, old_size);
  931. }
  932. }
  933. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  934. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowForInsertCountImpl(
  935. ssize_t count, KeyContextT key_context) -> void {
  936. if (count < growth_budget_) {
  937. // Already space for the needed growth.
  938. return;
  939. }
  940. // Currently, we don't account for any tombstones marking deleted elements,
  941. // and just conservatively ensure the growth will create adequate growth
  942. // budget for insertions. We could make this more precise by instead walking
  943. // the table and only counting present slots, as once we grow we'll be able to
  944. // reclaim all of the deleted slots. But this adds complexity and it isn't
  945. // clear this is necessary so we do the simpler conservative thing.
  946. ssize_t used_budget =
  947. GrowthThresholdForAllocSize(alloc_size()) - growth_budget_;
  948. ssize_t budget_needed = used_budget + count;
  949. ssize_t space_needed = budget_needed + (budget_needed / 7);
  950. ssize_t target_alloc_size = llvm::NextPowerOf2(space_needed);
  951. CARBON_CHECK(GrowthThresholdForAllocSize(target_alloc_size) >
  952. (budget_needed));
  953. GrowToAllocSizeImpl(target_alloc_size, key_context);
  954. }
  955. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  956. template <typename LookupKeyT>
  957. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::EraseImpl(
  958. LookupKeyT lookup_key, KeyContextT key_context) -> bool {
  959. EntryT* entry = view_impl_.LookupEntry(lookup_key, key_context);
  960. if (!entry) {
  961. return false;
  962. }
  963. // If there are empty slots in this group then nothing will probe past this
  964. // group looking for an entry so we can simply set this slot to empty as
  965. // well. However, if every slot in this group is full, it might be part of
  966. // a long probe chain that we can't disrupt. In that case we mark the slot's
  967. // metadata as deleted to keep probes continuing past it.
  968. //
  969. // If we mark the slot as empty, we'll also need to increase the growth
  970. // budget.
  971. uint8_t* local_metadata = metadata();
  972. EntryT* local_entries = entries();
  973. ssize_t index = entry - local_entries;
  974. ssize_t group_index = index & ~GroupMask;
  975. auto g = MetadataGroup::Load(local_metadata, group_index);
  976. auto empty_matched_range = g.MatchEmpty();
  977. if (empty_matched_range) {
  978. local_metadata[index] = MetadataGroup::Empty;
  979. ++growth_budget_;
  980. } else {
  981. local_metadata[index] = MetadataGroup::Deleted;
  982. }
  983. if constexpr (!EntryT::IsTriviallyDestructible) {
  984. entry->Destroy();
  985. }
  986. return true;
  987. }
  988. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  989. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::ClearImpl() -> void {
  990. view_impl_.ForEachEntry(
  991. [](EntryT& entry) {
  992. if constexpr (!EntryT::IsTriviallyDestructible) {
  993. entry.Destroy();
  994. }
  995. },
  996. [](uint8_t* metadata_group) {
  997. // Clear the group.
  998. std::memset(metadata_group, 0, GroupSize);
  999. });
  1000. growth_budget_ = GrowthThresholdForAllocSize(alloc_size());
  1001. }
  1002. // Allocates the appropriate memory layout for a table of the given
  1003. // `alloc_size`, with space both for the metadata array and entries.
  1004. //
  1005. // The returned pointer *must* be deallocated by calling the below `Deallocate`
  1006. // function with the same `alloc_size` as used here.
  1007. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1008. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Allocate(
  1009. ssize_t alloc_size) -> Storage* {
  1010. return reinterpret_cast<Storage*>(__builtin_operator_new(
  1011. ViewImplT::AllocByteSize(alloc_size),
  1012. static_cast<std::align_val_t>(Alignment), std::nothrow_t()));
  1013. }
  1014. // Deallocates a table's storage that was allocated with the `Allocate`
  1015. // function.
  1016. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1017. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Deallocate(
  1018. Storage* storage, ssize_t alloc_size) -> void {
  1019. ssize_t allocated_size = ViewImplT::AllocByteSize(alloc_size);
  1020. // We don't need the size, but make sure it always compiles.
  1021. static_cast<void>(allocated_size);
  1022. __builtin_operator_delete(storage,
  1023. #if __cpp_sized_deallocation
  1024. allocated_size,
  1025. #endif
  1026. static_cast<std::align_val_t>(Alignment));
  1027. }
  1028. // Construct a table using the provided small storage if `small_alloc_size_` is
  1029. // non-zero. If `small_alloc_size_` is zero, then `small_storage` won't be used
  1030. // and can be null. Regardless, after this the storage pointer is non-null and
  1031. // the size is non-zero so that we can directly begin inserting or querying the
  1032. // table.
  1033. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1034. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Construct(
  1035. Storage* small_storage) -> void {
  1036. if (small_alloc_size_ > 0) {
  1037. alloc_size() = small_alloc_size_;
  1038. storage() = small_storage;
  1039. } else {
  1040. // Directly allocate the initial buffer so that the hashtable is never in
  1041. // an empty state.
  1042. alloc_size() = MinAllocatedSize;
  1043. storage() = Allocate(MinAllocatedSize);
  1044. }
  1045. std::memset(metadata(), 0, alloc_size());
  1046. growth_budget_ = GrowthThresholdForAllocSize(alloc_size());
  1047. }
  1048. // Destroy the current table, releasing any memory used.
  1049. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1050. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Destroy() -> void {
  1051. // Check for a moved-from state and don't do anything. Only a moved-from table
  1052. // has a zero size.
  1053. if (alloc_size() == 0) {
  1054. return;
  1055. }
  1056. // Destroy all the entries.
  1057. if constexpr (!EntryT::IsTriviallyDestructible) {
  1058. view_impl_.ForEachEntry([](EntryT& entry) { entry.Destroy(); },
  1059. [](auto...) {});
  1060. }
  1061. // If small, nothing to deallocate.
  1062. if (is_small()) {
  1063. return;
  1064. }
  1065. // Just deallocate the storage without updating anything when destroying the
  1066. // object.
  1067. Deallocate(storage(), alloc_size());
  1068. }
  1069. // Copy all of the slots over from another table that is exactly the same
  1070. // allocation size.
  1071. //
  1072. // This requires the current table to already have storage allocated and set up
  1073. // but not initialized (or already cleared). It directly overwrites the storage
  1074. // allocation of the table to match the incoming argument.
  1075. //
  1076. // Despite being used in construction, this shouldn't be called for a moved-from
  1077. // `arg` -- in practice it is better for callers to handle this when setting up
  1078. // storage.
  1079. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1080. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::CopySlotsFrom(
  1081. const BaseImpl& arg) -> void
  1082. requires(EntryT::IsCopyable)
  1083. {
  1084. CARBON_DCHECK(alloc_size() == arg.alloc_size());
  1085. ssize_t local_size = alloc_size();
  1086. // Preserve which slot every entry is in, including tombstones in the
  1087. // metadata, in order to copy into the new table's storage without rehashing
  1088. // all of the keys. This is especially important as we don't have an easy way
  1089. // to access the key context needed for rehashing here.
  1090. uint8_t* local_metadata = metadata();
  1091. EntryT* local_entries = entries();
  1092. const uint8_t* local_arg_metadata = arg.metadata();
  1093. const EntryT* local_arg_entries = arg.entries();
  1094. memcpy(local_metadata, local_arg_metadata, local_size);
  1095. for (ssize_t group_index = 0; group_index < local_size;
  1096. group_index += GroupSize) {
  1097. auto g = MetadataGroup::Load(local_arg_metadata, group_index);
  1098. for (ssize_t byte_index : g.MatchPresent()) {
  1099. local_entries[group_index + byte_index].CopyFrom(
  1100. local_arg_entries[group_index + byte_index]);
  1101. }
  1102. }
  1103. }
  1104. // Move from another table to this one.
  1105. //
  1106. // Note that the `small_storage` is *this* table's small storage pointer,
  1107. // provided from the `TableImpl` to this `BaseImpl` method as an argument.
  1108. //
  1109. // Requires the table to have size and growth already set up but otherwise the
  1110. // the table has not yet been initialized. Notably, storage should either not
  1111. // yet be constructed or already destroyed. It both sets up the storage and
  1112. // handles any moving slots needed.
  1113. //
  1114. // Note that because this is used in construction it needs to handle a
  1115. // moved-from `arg`.
  1116. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1117. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::MoveFrom(
  1118. BaseImpl&& arg, Storage* small_storage) -> void {
  1119. ssize_t local_size = alloc_size();
  1120. CARBON_DCHECK(local_size == arg.alloc_size());
  1121. // If `arg` is moved-from, skip the rest as the local size is all we need.
  1122. if (local_size == 0) {
  1123. return;
  1124. }
  1125. if (arg.is_small()) {
  1126. CARBON_DCHECK(local_size == small_alloc_size_);
  1127. this->storage() = small_storage;
  1128. // For small tables, we have to move the entries as we can't move the tables
  1129. // themselves. We do this preserving their slots and even tombstones to
  1130. // avoid rehashing.
  1131. uint8_t* local_metadata = this->metadata();
  1132. EntryT* local_entries = this->entries();
  1133. uint8_t* local_arg_metadata = arg.metadata();
  1134. EntryT* local_arg_entries = arg.entries();
  1135. memcpy(local_metadata, local_arg_metadata, local_size);
  1136. if (EntryT::IsTriviallyRelocatable) {
  1137. memcpy(local_entries, local_arg_entries, local_size * sizeof(EntryT));
  1138. } else {
  1139. for (ssize_t group_index = 0; group_index < local_size;
  1140. group_index += GroupSize) {
  1141. auto g = MetadataGroup::Load(local_arg_metadata, group_index);
  1142. for (ssize_t byte_index : g.MatchPresent()) {
  1143. local_entries[group_index + byte_index].MoveFrom(
  1144. std::move(local_arg_entries[group_index + byte_index]));
  1145. }
  1146. }
  1147. }
  1148. } else {
  1149. // Just point to the allocated storage.
  1150. storage() = arg.storage();
  1151. }
  1152. // Finally, put the incoming table into a moved-from state.
  1153. arg.alloc_size() = 0;
  1154. // Replace the pointer with null to ease debugging.
  1155. arg.storage() = nullptr;
  1156. }
  1157. // Optimized routine to insert a key into a table when that key *definitely*
  1158. // isn't present in the table and the table *definitely* has a viable empty slot
  1159. // (and growth space) to insert into before any deleted slots. When both of
  1160. // these are true, typically just after growth, we can dramatically simplify the
  1161. // insert position search.
  1162. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1163. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::InsertIntoEmpty(
  1164. HashCode hash) -> EntryT* {
  1165. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  1166. uint8_t* local_metadata = metadata();
  1167. EntryT* local_entries = entries();
  1168. for (ProbeSequence s(hash_index, alloc_size());; s.Next()) {
  1169. ssize_t group_index = s.index();
  1170. auto g = MetadataGroup::Load(local_metadata, group_index);
  1171. if (auto empty_match = g.MatchEmpty()) {
  1172. ssize_t index = group_index + empty_match.index();
  1173. local_metadata[index] = tag | MetadataGroup::PresentMask;
  1174. return &local_entries[index];
  1175. }
  1176. // Otherwise we continue probing.
  1177. }
  1178. }
  1179. // Apply our doubling growth strategy and (re-)check invariants around table
  1180. // size.
  1181. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1182. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::ComputeNextAllocSize(
  1183. ssize_t old_alloc_size) -> ssize_t {
  1184. CARBON_DCHECK(llvm::isPowerOf2_64(old_alloc_size),
  1185. "Expected a power of two!");
  1186. ssize_t new_alloc_size;
  1187. bool overflow = __builtin_mul_overflow(old_alloc_size, 2, &new_alloc_size);
  1188. CARBON_CHECK(!overflow, "Computing the new size overflowed `ssize_t`!");
  1189. return new_alloc_size;
  1190. }
  1191. // Compute the growth threshold for a given size.
  1192. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1193. auto BaseImpl<InputKeyT, InputValueT,
  1194. InputKeyContextT>::GrowthThresholdForAllocSize(ssize_t alloc_size)
  1195. -> ssize_t {
  1196. // We use a 7/8ths load factor to trigger growth.
  1197. return alloc_size - alloc_size / 8;
  1198. }
  1199. // Optimized routine for growing to the next alloc size.
  1200. //
  1201. // A particularly common and important-to-optimize path is growing to the next
  1202. // alloc size, which will always be a doubling of the allocated size. This
  1203. // allows an important optimization -- we're adding exactly one more high bit to
  1204. // the hash-computed index for each entry. This in turn means we can classify
  1205. // every entry in the table into three cases:
  1206. //
  1207. // 1) The new high bit is zero, the entry is at the same index in the new
  1208. // table as the old.
  1209. //
  1210. // 2) The new high bit is one, the entry is at the old index plus the old
  1211. // size.
  1212. //
  1213. // 3) The entry's current index doesn't match the initial hash index because
  1214. // it required some amount of probing to find an empty slot.
  1215. //
  1216. // The design of the hash table tries to minimize how many entries fall into
  1217. // case (3), so we expect the vast majority of entries to be in (1) or (2). This
  1218. // lets us model growth notionally as copying the hashtable twice into the lower
  1219. // and higher halves of the new allocation, clearing out the now-empty slots
  1220. // (from both deleted entries and entries in the other half of the table after
  1221. // growth), and inserting any probed elements. That model in turn is much more
  1222. // efficient than re-inserting all of the elements as it avoids the unnecessary
  1223. // parts of insertion and avoids interleaving random accesses for the probed
  1224. // elements. But most importantly, for trivially relocatable types it allows us
  1225. // to use `memcpy` rather than moving the elements individually.
  1226. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1227. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowToNextAllocSize(
  1228. KeyContextT key_context) -> void {
  1229. // We collect the probed elements in a small vector for re-insertion. It is
  1230. // tempting to reuse the already allocated storage, but doing so appears to
  1231. // be a (very slight) performance regression. These are relatively rare and
  1232. // storing them into the existing storage creates stores to the same regions
  1233. // of memory we're reading. Moreover, it requires moving both the key and the
  1234. // value twice, and doing the `memcpy` widening for relocatable types before
  1235. // the group walk rather than after the group walk. In practice, between the
  1236. // statistical rareness and using a large small size buffer here on the stack,
  1237. // we can handle this most efficiently with temporary, additional storage.
  1238. llvm::SmallVector<std::pair<ssize_t, HashCode>, 128> probed_indices;
  1239. // Create locals for the old state of the table.
  1240. ssize_t old_size = alloc_size();
  1241. CARBON_DCHECK(old_size > 0);
  1242. bool old_small = is_small();
  1243. Storage* old_storage = storage();
  1244. uint8_t* old_metadata = metadata();
  1245. EntryT* old_entries = entries();
  1246. #ifndef NDEBUG
  1247. // Count how many of the old table slots will end up being empty after we grow
  1248. // the table. This is both the currently empty slots, but also the deleted
  1249. // slots because we clear them to empty and re-insert everything that had any
  1250. // probing.
  1251. ssize_t debug_empty_count =
  1252. llvm::count(llvm::ArrayRef(old_metadata, old_size), MetadataGroup::Empty);
  1253. ssize_t debug_deleted_count = llvm::count(
  1254. llvm::ArrayRef(old_metadata, old_size), MetadataGroup::Deleted);
  1255. CARBON_DCHECK(
  1256. debug_empty_count >= (old_size - GrowthThresholdForAllocSize(old_size)),
  1257. "debug_empty_count: {0}, debug_deleted_count: {1}, size: {2}",
  1258. debug_empty_count, debug_deleted_count, old_size);
  1259. #endif
  1260. // Configure for the new size and allocate the new storage.
  1261. ssize_t new_size = ComputeNextAllocSize(old_size);
  1262. alloc_size() = new_size;
  1263. storage() = Allocate(new_size);
  1264. growth_budget_ = GrowthThresholdForAllocSize(new_size);
  1265. // Now extract the new components of the table.
  1266. uint8_t* new_metadata = metadata();
  1267. EntryT* new_entries = entries();
  1268. // Walk the metadata groups, clearing deleted to empty, duplicating the
  1269. // metadata for the low and high halves, and updating it based on where each
  1270. // entry will go in the new table. The updated metadata group is written to
  1271. // the new table, and for non-trivially relocatable entry types, the entry is
  1272. // also moved to its new location.
  1273. ssize_t count = 0;
  1274. for (ssize_t group_index = 0; group_index < old_size;
  1275. group_index += GroupSize) {
  1276. auto low_g = MetadataGroup::Load(old_metadata, group_index);
  1277. // Make sure to match present elements first to enable pipelining with
  1278. // clearing.
  1279. auto present_matched_range = low_g.MatchPresent();
  1280. low_g.ClearDeleted();
  1281. MetadataGroup high_g;
  1282. if constexpr (MetadataGroup::FastByteClear) {
  1283. // When we have a fast byte clear, we can update the metadata for the
  1284. // growth in-register and store at the end.
  1285. high_g = low_g;
  1286. } else {
  1287. // If we don't have a fast byte clear, we can store the metadata group
  1288. // eagerly here and overwrite bytes with a byte store below instead of
  1289. // clearing the byte in-register.
  1290. low_g.Store(new_metadata, group_index);
  1291. low_g.Store(new_metadata, group_index | old_size);
  1292. }
  1293. for (ssize_t byte_index : present_matched_range) {
  1294. ++count;
  1295. ssize_t old_index = group_index + byte_index;
  1296. if constexpr (!MetadataGroup::FastByteClear) {
  1297. CARBON_DCHECK(new_metadata[old_index] == old_metadata[old_index]);
  1298. CARBON_DCHECK(new_metadata[old_index | old_size] ==
  1299. old_metadata[old_index]);
  1300. }
  1301. HashCode hash =
  1302. key_context.HashKey(old_entries[old_index].key(), ComputeSeed());
  1303. ssize_t old_hash_index = hash.ExtractIndexAndTag<7>().first &
  1304. ComputeProbeMaskFromSize(old_size);
  1305. if (LLVM_UNLIKELY(old_hash_index != group_index)) {
  1306. probed_indices.push_back({old_index, hash});
  1307. if constexpr (MetadataGroup::FastByteClear) {
  1308. low_g.ClearByte(byte_index);
  1309. high_g.ClearByte(byte_index);
  1310. } else {
  1311. new_metadata[old_index] = MetadataGroup::Empty;
  1312. new_metadata[old_index | old_size] = MetadataGroup::Empty;
  1313. }
  1314. continue;
  1315. }
  1316. ssize_t new_index = hash.ExtractIndexAndTag<7>().first &
  1317. ComputeProbeMaskFromSize(new_size);
  1318. CARBON_DCHECK(new_index == old_hash_index ||
  1319. new_index == (old_hash_index | old_size));
  1320. // Toggle the newly added bit of the index to get to the other possible
  1321. // target index.
  1322. if constexpr (MetadataGroup::FastByteClear) {
  1323. (new_index == old_hash_index ? high_g : low_g).ClearByte(byte_index);
  1324. new_index += byte_index;
  1325. } else {
  1326. new_index += byte_index;
  1327. new_metadata[new_index ^ old_size] = MetadataGroup::Empty;
  1328. }
  1329. // If we need to explicitly move (and destroy) the key or value, do so
  1330. // here where we already know its target.
  1331. if constexpr (!EntryT::IsTriviallyRelocatable) {
  1332. new_entries[new_index].MoveFrom(std::move(old_entries[old_index]));
  1333. }
  1334. }
  1335. if constexpr (MetadataGroup::FastByteClear) {
  1336. low_g.Store(new_metadata, group_index);
  1337. high_g.Store(new_metadata, (group_index | old_size));
  1338. }
  1339. }
  1340. CARBON_DCHECK((count - static_cast<ssize_t>(probed_indices.size())) ==
  1341. (new_size - llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1342. MetadataGroup::Empty)));
  1343. #ifndef NDEBUG
  1344. CARBON_DCHECK((debug_empty_count + debug_deleted_count) ==
  1345. (old_size - count));
  1346. CARBON_DCHECK(llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1347. MetadataGroup::Empty) ==
  1348. debug_empty_count + debug_deleted_count +
  1349. static_cast<ssize_t>(probed_indices.size()) + old_size);
  1350. #endif
  1351. // If the keys or values are trivially relocatable, we do a bulk memcpy of
  1352. // them into place. This will copy them into both possible locations, which is
  1353. // fine. One will be empty and clobbered if reused or ignored. The other will
  1354. // be the one used. This might seem like it needs it to be valid for us to
  1355. // create two copies, but it doesn't. This produces the exact same storage as
  1356. // copying the storage into the wrong location first, and then again into the
  1357. // correct location. Only one is live and only one is destroyed.
  1358. if constexpr (EntryT::IsTriviallyRelocatable) {
  1359. memcpy(new_entries, old_entries, old_size * sizeof(EntryT));
  1360. memcpy(new_entries + old_size, old_entries, old_size * sizeof(EntryT));
  1361. }
  1362. // We then need to do a normal insertion for anything that was probed before
  1363. // growth, but we know we'll find an empty slot, so leverage that.
  1364. for (auto [old_index, hash] : probed_indices) {
  1365. EntryT* new_entry = InsertIntoEmpty(hash);
  1366. new_entry->MoveFrom(std::move(old_entries[old_index]));
  1367. }
  1368. CARBON_DCHECK(count ==
  1369. (new_size - llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1370. MetadataGroup::Empty)));
  1371. growth_budget_ -= count;
  1372. CARBON_DCHECK(growth_budget_ ==
  1373. (GrowthThresholdForAllocSize(new_size) -
  1374. (new_size - llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1375. MetadataGroup::Empty))));
  1376. CARBON_DCHECK(growth_budget_ > 0 &&
  1377. "Must still have a growth budget after rehash!");
  1378. if (!old_small) {
  1379. // Old isn't a small buffer, so we need to deallocate it.
  1380. Deallocate(old_storage, old_size);
  1381. }
  1382. }
  1383. // Grow the hashtable to create space and then insert into it. Returns the
  1384. // selected insertion entry. Never returns null. In addition to growing and
  1385. // selecting the insertion entry, this routine updates the metadata array so
  1386. // that this function can be directly called and the result returned from
  1387. // `InsertImpl`.
  1388. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1389. [[clang::noinline]] auto
  1390. BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowAndInsert(
  1391. HashCode hash, KeyContextT key_context) -> EntryT* {
  1392. GrowToNextAllocSize(key_context);
  1393. // And insert the lookup_key into an index in the newly grown map and return
  1394. // that index for use.
  1395. --growth_budget_;
  1396. return InsertIntoEmpty(hash);
  1397. }
  1398. template <typename InputBaseT, ssize_t SmallSize>
  1399. TableImpl<InputBaseT, SmallSize>::TableImpl(const TableImpl& arg)
  1400. requires(BaseT::EntryT::IsCopyable)
  1401. : BaseT(arg.alloc_size(), arg.growth_budget_, SmallSize) {
  1402. // Check for completely broken objects. These invariants should be true even
  1403. // in a moved-from state.
  1404. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1405. arg.alloc_size() == SmallSize);
  1406. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1407. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1408. if (this->alloc_size() != 0) {
  1409. SetUpStorage();
  1410. this->CopySlotsFrom(arg);
  1411. }
  1412. }
  1413. template <typename InputBaseT, ssize_t SmallSize>
  1414. auto TableImpl<InputBaseT, SmallSize>::operator=(const TableImpl& arg)
  1415. -> TableImpl&
  1416. requires(BaseT::EntryT::IsCopyable)
  1417. {
  1418. // Check for completely broken objects. These invariants should be true even
  1419. // in a moved-from state.
  1420. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1421. arg.alloc_size() == SmallSize);
  1422. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1423. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1424. // We have to end up with an allocation size exactly equivalent to the
  1425. // incoming argument to avoid re-hashing every entry in the table, which isn't
  1426. // possible without key context.
  1427. if (arg.alloc_size() == this->alloc_size()) {
  1428. // No effective way for self-assignment to fall out of an efficient
  1429. // implementation so detect and bypass here. Similarly, if both are in a
  1430. // moved-from state, there is nothing to do.
  1431. if (&arg == this || this->alloc_size() == 0) {
  1432. return *this;
  1433. }
  1434. CARBON_DCHECK(arg.storage() != this->storage());
  1435. if constexpr (!EntryT::IsTriviallyDestructible) {
  1436. this->view_impl_.ForEachEntry([](EntryT& entry) { entry.Destroy(); },
  1437. [](auto...) {});
  1438. }
  1439. } else {
  1440. // The sizes don't match so destroy everything and re-setup the table
  1441. // storage.
  1442. this->Destroy();
  1443. this->alloc_size() = arg.alloc_size();
  1444. // If `arg` is moved-from, we've clear out our elements and put ourselves
  1445. // into a moved-from state. We're done.
  1446. if (this->alloc_size() == 0) {
  1447. return *this;
  1448. }
  1449. SetUpStorage();
  1450. }
  1451. this->growth_budget_ = arg.growth_budget_;
  1452. this->CopySlotsFrom(arg);
  1453. return *this;
  1454. }
  1455. // Puts the incoming table into a moved-from state that can be destroyed or
  1456. // re-initialized but must not be used otherwise.
  1457. template <typename InputBaseT, ssize_t SmallSize>
  1458. TableImpl<InputBaseT, SmallSize>::TableImpl(TableImpl&& arg) noexcept
  1459. : BaseT(arg.alloc_size(), arg.growth_budget_, SmallSize) {
  1460. // Check for completely broken objects. These invariants should be true even
  1461. // in a moved-from state.
  1462. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1463. arg.alloc_size() == SmallSize);
  1464. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1465. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1466. this->MoveFrom(std::move(arg), small_storage());
  1467. }
  1468. template <typename InputBaseT, ssize_t SmallSize>
  1469. auto TableImpl<InputBaseT, SmallSize>::operator=(TableImpl&& arg) noexcept
  1470. -> TableImpl& {
  1471. // Check for completely broken objects. These invariants should be true even
  1472. // in a moved-from state.
  1473. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1474. arg.alloc_size() == SmallSize);
  1475. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1476. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1477. // Destroy and deallocate our table.
  1478. this->Destroy();
  1479. // Defend against self-move by zeroing the size here before we start moving
  1480. // out of `arg`.
  1481. this->alloc_size() = 0;
  1482. // Setup to match argument and then finish the move.
  1483. this->alloc_size() = arg.alloc_size();
  1484. this->growth_budget_ = arg.growth_budget_;
  1485. this->MoveFrom(std::move(arg), small_storage());
  1486. return *this;
  1487. }
  1488. template <typename InputBaseT, ssize_t SmallSize>
  1489. TableImpl<InputBaseT, SmallSize>::~TableImpl() {
  1490. this->Destroy();
  1491. }
  1492. // Reset a table to its original state, including releasing any allocated
  1493. // memory.
  1494. template <typename InputBaseT, ssize_t SmallSize>
  1495. auto TableImpl<InputBaseT, SmallSize>::ResetImpl() -> void {
  1496. this->Destroy();
  1497. // Re-initialize the whole thing.
  1498. CARBON_DCHECK(this->small_alloc_size() == SmallSize);
  1499. this->Construct(small_storage());
  1500. }
  1501. template <typename InputBaseT, ssize_t SmallSize>
  1502. auto TableImpl<InputBaseT, SmallSize>::small_storage() const -> Storage* {
  1503. if constexpr (SmallSize > 0) {
  1504. // Do a bunch of validation of the small size to establish our invariants
  1505. // when we know we have a non-zero small size.
  1506. static_assert(llvm::isPowerOf2_64(SmallSize),
  1507. "SmallSize must be a power of two for a hashed buffer!");
  1508. static_assert(
  1509. SmallSize >= MaxGroupSize,
  1510. "We require all small sizes to multiples of the largest group "
  1511. "size supported to ensure it can be used portably. ");
  1512. static_assert(
  1513. (SmallSize % MaxGroupSize) == 0,
  1514. "Small size must be a multiple of the max group size supported "
  1515. "so that we can allocate a whole number of groups.");
  1516. // Implied by the max asserts above.
  1517. static_assert(SmallSize >= GroupSize);
  1518. static_assert((SmallSize % GroupSize) == 0);
  1519. static_assert(SmallSize >= alignof(StorageEntry<KeyT, ValueT>),
  1520. "Requested a small size that would require padding between "
  1521. "metadata bytes and correctly aligned key and value types. "
  1522. "Either a larger small size or a zero small size and heap "
  1523. "allocation are required for this key and value type.");
  1524. static_assert(offsetof(SmallStorage, entries) == SmallSize,
  1525. "Offset to entries in small size storage doesn't match "
  1526. "computed offset!");
  1527. return &small_storage_;
  1528. } else {
  1529. static_assert(
  1530. sizeof(TableImpl) == sizeof(BaseT),
  1531. "Empty small storage caused a size difference and wasted space!");
  1532. return nullptr;
  1533. }
  1534. }
  1535. // Helper to set up the storage of a table when a specific size has already been
  1536. // set up. If possible, uses any small storage, otherwise allocates.
  1537. template <typename InputBaseT, ssize_t SmallSize>
  1538. auto TableImpl<InputBaseT, SmallSize>::SetUpStorage() -> void {
  1539. CARBON_DCHECK(this->small_alloc_size() == SmallSize);
  1540. ssize_t local_size = this->alloc_size();
  1541. CARBON_DCHECK(local_size != 0);
  1542. if (local_size == SmallSize) {
  1543. this->storage() = small_storage();
  1544. } else {
  1545. this->storage() = BaseT::Allocate(local_size);
  1546. }
  1547. }
  1548. } // namespace Carbon::RawHashtable
  1549. #endif // CARBON_COMMON_RAW_HASHTABLE_H_