raw_hashtable_benchmark_helpers.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "common/raw_hashtable_benchmark_helpers.h"
  5. #include <cstddef>
  6. #include <forward_list>
  7. namespace Carbon::RawHashtable {
  8. // A local shuffle implementation built on Abseil to improve performance in
  9. // debug builds.
  10. template <typename T>
  11. static auto Shuffle(llvm::MutableArrayRef<T> data, absl::BitGen& gen) {
  12. for (ssize_t i : llvm::seq<ssize_t>(0, data.size() - 1)) {
  13. ssize_t j = absl::Uniform<ssize_t>(gen, 0, data.size() - i);
  14. if (j != 0) {
  15. std::swap(data[i], data[i + j]);
  16. }
  17. }
  18. }
  19. constexpr ssize_t NumChars = 64;
  20. static_assert(llvm::isPowerOf2_64(NumChars));
  21. // For benchmarking, we use short strings in a fixed distribution with common
  22. // characters. Real-world strings aren't uniform across ASCII or Unicode, etc.
  23. // And for *micro*-benchmarking we want to focus on the map overhead with short,
  24. // fast keys.
  25. static auto MakeChars() -> llvm::OwningArrayRef<char> {
  26. llvm::OwningArrayRef<char> characters(NumChars);
  27. // Start with `-` and `_`, and then add `a` - `z`, `A` - `Z`, and `0` - `9`.
  28. characters[0] = '-';
  29. characters[1] = '_';
  30. ssize_t i = 2;
  31. for (auto range :
  32. {llvm::seq_inclusive('a', 'z'), llvm::seq_inclusive('A', 'Z'),
  33. llvm::seq_inclusive('0', '9')}) {
  34. for (char c : range) {
  35. characters[i] = c;
  36. ++i;
  37. }
  38. }
  39. CARBON_CHECK(i == NumChars,
  40. "Expected exactly {0} characters, got {1} instead!", NumChars,
  41. i);
  42. return characters;
  43. }
  44. constexpr ssize_t NumFourCharStrs = NumChars * NumChars * NumChars * NumChars;
  45. static_assert(llvm::isPowerOf2_64(NumFourCharStrs));
  46. // Compute every 4-character string in a shuffled array. This is a little memory
  47. // intense -- 64 MiB -- but ends up being much cheaper by letting us reliably
  48. // select a unique 4-character sequence to avoid collisions.
  49. static auto MakeFourCharStrs(llvm::ArrayRef<char> characters, absl::BitGen& gen)
  50. -> llvm::OwningArrayRef<std::array<char, 4>> {
  51. constexpr ssize_t NumCharsMask = NumChars - 1;
  52. constexpr ssize_t NumCharsShift = llvm::CTLog2<NumChars>();
  53. llvm::OwningArrayRef<std::array<char, 4>> four_char_strs(NumFourCharStrs);
  54. for (auto [i, str] : llvm::enumerate(four_char_strs)) {
  55. str[0] = characters[i & NumCharsMask];
  56. i >>= NumCharsShift;
  57. str[1] = characters[i & NumCharsMask];
  58. i >>= NumCharsShift;
  59. str[2] = characters[i & NumCharsMask];
  60. i >>= NumCharsShift;
  61. CARBON_CHECK((i & ~NumCharsMask) == 0);
  62. str[3] = characters[i];
  63. }
  64. Shuffle(four_char_strs, gen);
  65. return four_char_strs;
  66. }
  67. constexpr ssize_t NumRandomChars = static_cast<ssize_t>(64) * 1024;
  68. // Create a pool of random characters to sample from rather than computing this
  69. // for every string which is very slow in debug builds. We also pad this pool
  70. // with the max length so we can pull the full length from the end to simplify
  71. // the logic when wrapping around the pool.
  72. static auto MakeRandomChars(llvm::ArrayRef<char> characters, int max_length,
  73. absl::BitGen& gen) -> llvm::OwningArrayRef<char> {
  74. llvm::OwningArrayRef<char> random_chars(NumRandomChars + max_length);
  75. for (char& c : random_chars) {
  76. c = characters[absl::Uniform<ssize_t>(gen, 0, NumChars)];
  77. }
  78. return random_chars;
  79. }
  80. // Make a small vector of pointers into a single allocation of raw strings. The
  81. // allocated memory is expected to leak and must be transitively referenced by a
  82. // global. Each string has `length` size (which must be >= 4), and there are
  83. // `key_count` keys in the result. Each key is filled from the `random_chars`
  84. // until the last 4 characters. The last four characters of each string will be
  85. // taken sequentially from `four_char_strs` from some random start position to
  86. // ensure no duplicate keys are produced.
  87. static auto MakeRawStrKeys(ssize_t length, ssize_t key_count,
  88. llvm::ArrayRef<std::array<char, 4>> four_char_strs,
  89. llvm::ArrayRef<char> random_chars, absl::BitGen& gen)
  90. -> llvm::SmallVector<const char*> {
  91. llvm::SmallVector<const char*> raw_keys;
  92. CARBON_CHECK(length >= 4);
  93. ssize_t prefix_length = length - 4;
  94. // Select a random start for indexing our four character strings.
  95. ssize_t four_char_index = absl::Uniform<ssize_t>(gen, 0, NumFourCharStrs);
  96. // Select a random start for the prefix random characters.
  97. ssize_t random_chars_index = absl::Uniform<ssize_t>(gen, 0, NumRandomChars);
  98. // Do a single memory allocation for all the keys of this length to
  99. // avoid an excessive number of small and fragmented allocations. This
  100. // memory is intentionally leaked as the keys are global and will
  101. // themselves will point into it.
  102. char* key_text = new char[key_count * length];
  103. // Reserve all the key space since we know how many we'll need.
  104. raw_keys.reserve(key_count);
  105. for ([[gnu::unused]] ssize_t i : llvm::seq<ssize_t>(0, key_count)) {
  106. memcpy(key_text, random_chars.data() + random_chars_index, prefix_length);
  107. random_chars_index += prefix_length;
  108. random_chars_index &= NumRandomChars - 1;
  109. // Set the last four characters with this entry in the shuffled
  110. // sequence.
  111. memcpy(key_text + prefix_length, four_char_strs[four_char_index].data(), 4);
  112. // Step through the shuffled sequence. We start at a random position,
  113. // so we need to wrap around the end.
  114. ++four_char_index;
  115. four_char_index &= NumFourCharStrs - 1;
  116. // And finally save the start pointer as one of our raw keys.
  117. raw_keys.push_back(key_text);
  118. key_text += length;
  119. }
  120. return raw_keys;
  121. }
  122. // Build up a large collection of random and unique string keys. This is
  123. // actually a relatively expensive operation due to needing to build all the
  124. // random string text. As a consequence, the initializer of this global is
  125. // somewhat performance tuned to ensure benchmarks don't take an excessive
  126. // amount of time to run or use an excessive amount of memory.
  127. static absl::NoDestructor<llvm::OwningArrayRef<llvm::StringRef>> raw_str_keys{
  128. [] {
  129. llvm::OwningArrayRef<llvm::StringRef> keys(MaxNumKeys);
  130. absl::BitGen gen;
  131. std::array length_buckets = {
  132. 4, 4, 4, 4, 5, 5, 5, 5, 7, 7, 10, 10, 15, 25, 40, 80,
  133. };
  134. static_assert((MaxNumKeys % length_buckets.size()) == 0);
  135. CARBON_CHECK(llvm::is_sorted(length_buckets));
  136. // For each distinct length bucket, we build a vector of raw keys.
  137. std::forward_list<llvm::SmallVector<const char*>> raw_keys_storage;
  138. // And a parallel array to the length buckets with the raw keys of that
  139. // length.
  140. std::array<llvm::SmallVector<const char*>*, length_buckets.size()>
  141. raw_keys_buckets;
  142. llvm::OwningArrayRef<char> characters = MakeChars();
  143. llvm::OwningArrayRef<std::array<char, 4>> four_char_strs =
  144. MakeFourCharStrs(characters, gen);
  145. llvm::OwningArrayRef<char> random_chars = MakeRandomChars(
  146. characters, /*max_length=*/length_buckets.back(), gen);
  147. ssize_t prev_length = -1;
  148. for (auto [length_index, length] : llvm::enumerate(length_buckets)) {
  149. // We can detect repetitions in length as they are sorted.
  150. if (length == prev_length) {
  151. raw_keys_buckets[length_index] = raw_keys_buckets[length_index - 1];
  152. continue;
  153. }
  154. prev_length = length;
  155. // We want to compute all the keys of this length that we'll need.
  156. ssize_t key_count = (MaxNumKeys / length_buckets.size()) *
  157. llvm::count(length_buckets, length);
  158. raw_keys_buckets[length_index] =
  159. &raw_keys_storage.emplace_front(MakeRawStrKeys(
  160. length, key_count, four_char_strs, random_chars, gen));
  161. }
  162. // Now build the actual key array from our intermediate storage by
  163. // round-robin extracting from the length buckets.
  164. for (auto [index, key] : llvm::enumerate(keys)) {
  165. ssize_t bucket = index % length_buckets.size();
  166. ssize_t length = length_buckets[bucket];
  167. // We pop a raw key from the list of them associated with this bucket.
  168. const char* raw_key = raw_keys_buckets[bucket]->pop_back_val();
  169. // And build our key from that.
  170. key = llvm::StringRef(raw_key, length);
  171. }
  172. // Check that in fact we popped every raw key into our main keys.
  173. for (const auto& raw_keys : raw_keys_storage) {
  174. CARBON_CHECK(raw_keys.empty());
  175. }
  176. return keys;
  177. }()};
  178. static absl::NoDestructor<llvm::OwningArrayRef<int*>> raw_ptr_keys{[] {
  179. llvm::OwningArrayRef<int*> keys(MaxNumKeys);
  180. for (auto [index, key] : llvm::enumerate(keys)) {
  181. // We leak these pointers -- this is a static initializer executed once.
  182. key = new int(static_cast<int>(index));
  183. }
  184. return keys;
  185. }()};
  186. static absl::NoDestructor<llvm::OwningArrayRef<int>> raw_int_keys{[] {
  187. llvm::OwningArrayRef<int> keys(MaxNumKeys);
  188. for (auto [index, key] : llvm::enumerate(keys)) {
  189. key = index + 1;
  190. }
  191. return keys;
  192. }()};
  193. namespace {
  194. // Allow generically dispatching over the specific key types we build and
  195. // support.
  196. template <typename T>
  197. auto GetRawKeys() -> llvm::ArrayRef<T> {
  198. if constexpr (std::is_same_v<T, llvm::StringRef>) {
  199. return *raw_str_keys;
  200. } else if constexpr (std::is_pointer_v<T>) {
  201. return *raw_ptr_keys;
  202. } else {
  203. return *raw_int_keys;
  204. }
  205. }
  206. template <typename T>
  207. static absl::NoDestructor<
  208. std::map<std::pair<ssize_t, ssize_t>, llvm::OwningArrayRef<T>>>
  209. lookup_keys_storage;
  210. // Given a particular table keys size and lookup keys size, provide an array ref
  211. // to a shuffled set of lookup keys.
  212. //
  213. // Because different table sizes pull from different sub-ranges of our raw keys,
  214. // we need to compute a distinct set of random keys in the table to use for
  215. // lookups depending on the table size. And we also want to have an even
  216. // distribution of key *sizes* throughout the lookup keys, and so we can't
  217. // compute a single lookup keys array of the maximum size. Instead we need to
  218. // compute a distinct special set of lookup keys for each pair of table and
  219. // lookup size, and then shuffle that specific set into a random sequence that
  220. // is returned. This function memoizes this sequence for each pair of sizes.
  221. template <typename T>
  222. auto GetShuffledLookupKeys(ssize_t table_keys_size, ssize_t lookup_keys_size)
  223. -> llvm::ArrayRef<T> {
  224. // The raw keys aren't shuffled and round-robin through the sizes. We want to
  225. // keep the total size of lookup keys used exactly the same across runs. So
  226. // for a given size we always take the leading sequence from the raw keys for
  227. // that size, duplicating as needed to get the desired lookup sequence size,
  228. // and then shuffle the keys in that sequence to end up with a random sequence
  229. // of keys. We store each of these shuffled sequences in a map to avoid
  230. // repeatedly computing them.
  231. llvm::OwningArrayRef<T>& lookup_keys =
  232. (*lookup_keys_storage<T>)[{table_keys_size, lookup_keys_size}];
  233. if (lookup_keys.empty()) {
  234. lookup_keys = llvm::OwningArrayRef<T>(lookup_keys_size);
  235. auto raw_keys = GetRawKeys<T>();
  236. for (auto [index, key] : llvm::enumerate(lookup_keys)) {
  237. key = raw_keys[index % table_keys_size];
  238. }
  239. absl::BitGen gen;
  240. Shuffle(lookup_keys, gen);
  241. }
  242. CARBON_CHECK(static_cast<ssize_t>(lookup_keys.size()) == lookup_keys_size);
  243. return lookup_keys;
  244. }
  245. } // namespace
  246. template <typename T>
  247. auto GetKeysAndMissKeys(ssize_t table_keys_size)
  248. -> std::pair<llvm::ArrayRef<T>, llvm::ArrayRef<T>> {
  249. CARBON_CHECK(table_keys_size <= MaxNumKeys);
  250. // The raw keys aren't shuffled and round-robin through the sizes. Take the
  251. // tail of this sequence and shuffle it to form a random set of miss keys with
  252. // a consistent total size.
  253. static absl::NoDestructor<llvm::OwningArrayRef<T>> miss_keys{[] {
  254. llvm::OwningArrayRef<T> keys;
  255. keys = GetRawKeys<T>().take_back(NumOtherKeys);
  256. CARBON_CHECK(keys.size() == NumOtherKeys);
  257. absl::BitGen gen;
  258. Shuffle(keys, gen);
  259. return keys;
  260. }()};
  261. return {GetRawKeys<T>().slice(0, table_keys_size), *miss_keys};
  262. }
  263. template auto GetKeysAndMissKeys<int>(ssize_t size)
  264. -> std::pair<llvm::ArrayRef<int>, llvm::ArrayRef<int>>;
  265. template auto GetKeysAndMissKeys<int*>(ssize_t size)
  266. -> std::pair<llvm::ArrayRef<int*>, llvm::ArrayRef<int*>>;
  267. template auto GetKeysAndMissKeys<llvm::StringRef>(ssize_t size)
  268. -> std::pair<llvm::ArrayRef<llvm::StringRef>,
  269. llvm::ArrayRef<llvm::StringRef>>;
  270. template <typename T>
  271. auto GetKeysAndHitKeys(ssize_t table_keys_size, ssize_t lookup_keys_size)
  272. -> std::pair<llvm::ArrayRef<T>, llvm::ArrayRef<T>> {
  273. CARBON_CHECK(table_keys_size <= MaxNumKeys);
  274. CARBON_CHECK(lookup_keys_size <= MaxNumKeys);
  275. return {GetRawKeys<T>().slice(0, table_keys_size),
  276. GetShuffledLookupKeys<T>(table_keys_size, lookup_keys_size)};
  277. }
  278. template auto GetKeysAndHitKeys<int>(ssize_t size, ssize_t lookup_keys_size)
  279. -> std::pair<llvm::ArrayRef<int>, llvm::ArrayRef<int>>;
  280. template auto GetKeysAndHitKeys<int*>(ssize_t size, ssize_t lookup_keys_size)
  281. -> std::pair<llvm::ArrayRef<int*>, llvm::ArrayRef<int*>>;
  282. template auto GetKeysAndHitKeys<llvm::StringRef>(ssize_t size,
  283. ssize_t lookup_keys_size)
  284. -> std::pair<llvm::ArrayRef<llvm::StringRef>,
  285. llvm::ArrayRef<llvm::StringRef>>;
  286. template <typename T>
  287. auto DumpHashStatistics(llvm::ArrayRef<T> keys) -> void {
  288. if (keys.size() < GroupSize) {
  289. return;
  290. }
  291. // The hash table load factor is 7/8ths, so we want to add 1/7th of our
  292. // current size, subtract one, and pick the next power of two to get the power
  293. // of two where 7/8ths is greater than or equal to the incoming key size.
  294. ssize_t expected_size =
  295. llvm::NextPowerOf2(keys.size() + (keys.size() / 7) - 1);
  296. constexpr int GroupShift = llvm::CTLog2<GroupSize>();
  297. size_t mask = ComputeProbeMaskFromSize(expected_size);
  298. uint64_t salt = ComputeSeed();
  299. auto get_hash_index = [mask, salt](auto x) -> ssize_t {
  300. auto [hash_index, _] = HashValue(x, salt).template ExtractIndexAndTag<7>();
  301. return (hash_index & mask) >> GroupShift;
  302. };
  303. std::vector<std::vector<int>> grouped_key_indices(expected_size >>
  304. GroupShift);
  305. for (auto [i, k] : llvm::enumerate(keys)) {
  306. ssize_t hash_index = get_hash_index(k);
  307. CARBON_CHECK(hash_index < (expected_size >> GroupShift), "{0}", hash_index);
  308. grouped_key_indices[hash_index].push_back(i);
  309. }
  310. ssize_t max_group_index =
  311. llvm::max_element(grouped_key_indices,
  312. [](const auto& lhs, const auto& rhs) {
  313. return lhs.size() < rhs.size();
  314. }) -
  315. grouped_key_indices.begin();
  316. // If the max number of collisions on the index is less than or equal to the
  317. // group size, there shouldn't be any necessary probing (outside of deletion)
  318. // and so this isn't interesting, skip printing.
  319. if (grouped_key_indices[max_group_index].size() <= GroupSize) {
  320. return;
  321. }
  322. llvm::errs() << "keys: " << keys.size()
  323. << " groups: " << grouped_key_indices.size() << "\n"
  324. << "max group index: " << llvm::formatv("{0x8}", max_group_index)
  325. << " collisions: "
  326. << grouped_key_indices[max_group_index].size() << "\n";
  327. for (auto i : llvm::ArrayRef(grouped_key_indices[max_group_index])
  328. .take_front(2 * GroupSize)) {
  329. auto k = keys[i];
  330. auto hash = static_cast<uint64_t>(HashValue(k, salt));
  331. llvm::errs() << " key: " << k
  332. << " salt: " << llvm::formatv("{0:x16}", salt)
  333. << " hash: " << llvm::formatv("{0:x16}", hash) << "\n";
  334. }
  335. }
  336. template auto DumpHashStatistics(llvm::ArrayRef<int> keys) -> void;
  337. template auto DumpHashStatistics(llvm::ArrayRef<int*> keys) -> void;
  338. template auto DumpHashStatistics(llvm::ArrayRef<llvm::StringRef> keys) -> void;
  339. } // namespace Carbon::RawHashtable