raw_hashtable_benchmark_helpers.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "common/raw_hashtable_benchmark_helpers.h"
  5. #include <cstddef>
  6. #include <forward_list>
  7. namespace Carbon::RawHashtable {
  8. // A local shuffle implementation built on Abseil to improve performance in
  9. // debug builds.
  10. template <typename T>
  11. static auto Shuffle(llvm::MutableArrayRef<T> data, absl::BitGen& gen) {
  12. for (ssize_t i : llvm::seq<ssize_t>(0, data.size() - 1)) {
  13. ssize_t j = absl::Uniform<ssize_t>(gen, 0, data.size() - i);
  14. if (j != 0) {
  15. std::swap(data[i], data[i + j]);
  16. }
  17. }
  18. }
  19. constexpr ssize_t NumChars = 64;
  20. static_assert(llvm::isPowerOf2_64(NumChars));
  21. // For benchmarking, we use short strings in a fixed distribution with common
  22. // characters. Real-world strings aren't uniform across ASCII or Unicode, etc.
  23. // And for *micro*-benchmarking we want to focus on the map overhead with short,
  24. // fast keys.
  25. static auto MakeChars() -> llvm::OwningArrayRef<char> {
  26. llvm::OwningArrayRef<char> characters(NumChars);
  27. // Start with `-` and `_`, and then add `a` - `z`, `A` - `Z`, and `0` - `9`.
  28. characters[0] = '-';
  29. characters[1] = '_';
  30. ssize_t i = 2;
  31. for (auto range :
  32. {llvm::seq_inclusive('a', 'z'), llvm::seq_inclusive('A', 'Z'),
  33. llvm::seq_inclusive('0', '9')}) {
  34. for (char c : range) {
  35. characters[i] = c;
  36. ++i;
  37. }
  38. }
  39. CARBON_CHECK(i == NumChars) << "Expected exactly " << NumChars
  40. << " characters, got " << i << " instead!";
  41. return characters;
  42. }
  43. constexpr ssize_t NumFourCharStrs = NumChars * NumChars * NumChars * NumChars;
  44. static_assert(llvm::isPowerOf2_64(NumFourCharStrs));
  45. // Compute every 4-character string in a shuffled array. This is a little memory
  46. // intense -- 64 MiB -- but ends up being much cheaper by letting us reliably
  47. // select a unique 4-character sequence to avoid collisions.
  48. static auto MakeFourCharStrs(llvm::ArrayRef<char> characters, absl::BitGen& gen)
  49. -> llvm::OwningArrayRef<std::array<char, 4>> {
  50. constexpr ssize_t NumCharsMask = NumChars - 1;
  51. constexpr ssize_t NumCharsShift = llvm::CTLog2<NumChars>();
  52. llvm::OwningArrayRef<std::array<char, 4>> four_char_strs(NumFourCharStrs);
  53. for (auto [i, str] : llvm::enumerate(four_char_strs)) {
  54. str[0] = characters[i & NumCharsMask];
  55. i >>= NumCharsShift;
  56. str[1] = characters[i & NumCharsMask];
  57. i >>= NumCharsShift;
  58. str[2] = characters[i & NumCharsMask];
  59. i >>= NumCharsShift;
  60. CARBON_CHECK((i & ~NumCharsMask) == 0);
  61. str[3] = characters[i];
  62. }
  63. Shuffle(four_char_strs, gen);
  64. return four_char_strs;
  65. }
  66. constexpr ssize_t NumRandomChars = static_cast<ssize_t>(64) * 1024;
  67. // Create a pool of random characters to sample from rather than computing this
  68. // for every string which is very slow in debug builds. We also pad this pool
  69. // with the max length so we can pull the full length from the end to simplify
  70. // the logic when wrapping around the pool.
  71. static auto MakeRandomChars(llvm::ArrayRef<char> characters, int max_length,
  72. absl::BitGen& gen) -> llvm::OwningArrayRef<char> {
  73. llvm::OwningArrayRef<char> random_chars(NumRandomChars + max_length);
  74. for (char& c : random_chars) {
  75. c = characters[absl::Uniform<ssize_t>(gen, 0, NumChars)];
  76. }
  77. return random_chars;
  78. }
  79. // Make a small vector of pointers into a single allocation of raw strings. The
  80. // allocated memory is expected to leak and must be transitively referenced by a
  81. // global. Each string has `length` size (which must be >= 4), and there are
  82. // `key_count` keys in the result. Each key is filled from the `random_chars`
  83. // until the last 4 characters. The last four characters of each string will be
  84. // taken sequentially from `four_char_strs` from some random start position to
  85. // ensure no duplicate keys are produced.
  86. static auto MakeRawStrKeys(ssize_t length, ssize_t key_count,
  87. llvm::ArrayRef<std::array<char, 4>> four_char_strs,
  88. llvm::ArrayRef<char> random_chars, absl::BitGen& gen)
  89. -> llvm::SmallVector<const char*> {
  90. llvm::SmallVector<const char*> raw_keys;
  91. CARBON_CHECK(length >= 4);
  92. ssize_t prefix_length = length - 4;
  93. // Select a random start for indexing our four character strings.
  94. ssize_t four_char_index = absl::Uniform<ssize_t>(gen, 0, NumFourCharStrs);
  95. // Select a random start for the prefix random characters.
  96. ssize_t random_chars_index = absl::Uniform<ssize_t>(gen, 0, NumRandomChars);
  97. // Do a single memory allocation for all the keys of this length to
  98. // avoid an excessive number of small and fragmented allocations. This
  99. // memory is intentionally leaked as the keys are global and will
  100. // themselves will point into it.
  101. char* key_text = new char[key_count * length];
  102. // Reserve all the key space since we know how many we'll need.
  103. raw_keys.reserve(key_count);
  104. for ([[gnu::unused]] ssize_t i : llvm::seq<ssize_t>(0, key_count)) {
  105. memcpy(key_text, random_chars.data() + random_chars_index, prefix_length);
  106. random_chars_index += prefix_length;
  107. random_chars_index &= NumRandomChars - 1;
  108. // Set the last four characters with this entry in the shuffled
  109. // sequence.
  110. memcpy(key_text + prefix_length, four_char_strs[four_char_index].data(), 4);
  111. // Step through the shuffled sequence. We start at a random position,
  112. // so we need to wrap around the end.
  113. ++four_char_index;
  114. four_char_index &= NumFourCharStrs - 1;
  115. // And finally save the start pointer as one of our raw keys.
  116. raw_keys.push_back(key_text);
  117. key_text += length;
  118. }
  119. return raw_keys;
  120. }
  121. // Build up a large collection of random and unique string keys. This is
  122. // actually a relatively expensive operation due to needing to build all the
  123. // random string text. As a consequence, the initializer of this global is
  124. // somewhat performance tuned to ensure benchmarks don't take an excessive
  125. // amount of time to run or use an excessive amount of memory.
  126. static absl::NoDestructor<llvm::OwningArrayRef<llvm::StringRef>> raw_str_keys{
  127. [] {
  128. llvm::OwningArrayRef<llvm::StringRef> keys(MaxNumKeys);
  129. absl::BitGen gen;
  130. std::array length_buckets = {
  131. 4, 4, 4, 4, 5, 5, 5, 5, 7, 7, 10, 10, 15, 25, 40, 80,
  132. };
  133. static_assert((MaxNumKeys % length_buckets.size()) == 0);
  134. CARBON_CHECK(llvm::is_sorted(length_buckets));
  135. // For each distinct length bucket, we build a vector of raw keys.
  136. std::forward_list<llvm::SmallVector<const char*>> raw_keys_storage;
  137. // And a parallel array to the length buckets with the raw keys of that
  138. // length.
  139. std::array<llvm::SmallVector<const char*>*, length_buckets.size()>
  140. raw_keys_buckets;
  141. llvm::OwningArrayRef<char> characters = MakeChars();
  142. llvm::OwningArrayRef<std::array<char, 4>> four_char_strs =
  143. MakeFourCharStrs(characters, gen);
  144. llvm::OwningArrayRef<char> random_chars = MakeRandomChars(
  145. characters, /*max_length=*/length_buckets.back(), gen);
  146. ssize_t prev_length = -1;
  147. for (auto [length_index, length] : llvm::enumerate(length_buckets)) {
  148. // We can detect repetitions in length as they are sorted.
  149. if (length == prev_length) {
  150. raw_keys_buckets[length_index] = raw_keys_buckets[length_index - 1];
  151. continue;
  152. }
  153. prev_length = length;
  154. // We want to compute all the keys of this length that we'll need.
  155. ssize_t key_count = (MaxNumKeys / length_buckets.size()) *
  156. llvm::count(length_buckets, length);
  157. raw_keys_buckets[length_index] =
  158. &raw_keys_storage.emplace_front(MakeRawStrKeys(
  159. length, key_count, four_char_strs, random_chars, gen));
  160. }
  161. // Now build the actual key array from our intermediate storage by
  162. // round-robin extracting from the length buckets.
  163. for (auto [index, key] : llvm::enumerate(keys)) {
  164. ssize_t bucket = index % length_buckets.size();
  165. ssize_t length = length_buckets[bucket];
  166. // We pop a raw key from the list of them associated with this bucket.
  167. const char* raw_key = raw_keys_buckets[bucket]->pop_back_val();
  168. // And build our key from that.
  169. key = llvm::StringRef(raw_key, length);
  170. }
  171. // Check that in fact we popped every raw key into our main keys.
  172. for (const auto& raw_keys : raw_keys_storage) {
  173. CARBON_CHECK(raw_keys.empty());
  174. }
  175. return keys;
  176. }()};
  177. static absl::NoDestructor<llvm::OwningArrayRef<int*>> raw_ptr_keys{[] {
  178. llvm::OwningArrayRef<int*> keys(MaxNumKeys);
  179. for (auto [index, key] : llvm::enumerate(keys)) {
  180. // We leak these pointers -- this is a static initializer executed once.
  181. key = new int(static_cast<int>(index));
  182. }
  183. return keys;
  184. }()};
  185. static absl::NoDestructor<llvm::OwningArrayRef<int>> raw_int_keys{[] {
  186. llvm::OwningArrayRef<int> keys(MaxNumKeys);
  187. for (auto [index, key] : llvm::enumerate(keys)) {
  188. key = index + 1;
  189. }
  190. return keys;
  191. }()};
  192. namespace {
  193. // Allow generically dispatching over the specific key types we build and
  194. // support.
  195. template <typename T>
  196. auto GetRawKeys() -> llvm::ArrayRef<T> {
  197. if constexpr (std::is_same_v<T, llvm::StringRef>) {
  198. return *raw_str_keys;
  199. } else if constexpr (std::is_pointer_v<T>) {
  200. return *raw_ptr_keys;
  201. } else {
  202. return *raw_int_keys;
  203. }
  204. }
  205. template <typename T>
  206. static absl::NoDestructor<
  207. std::map<std::pair<ssize_t, ssize_t>, llvm::OwningArrayRef<T>>>
  208. lookup_keys_storage;
  209. // Given a particular table keys size and lookup keys size, provide an array ref
  210. // to a shuffled set of lookup keys.
  211. //
  212. // Because different table sizes pull from different sub-ranges of our raw keys,
  213. // we need to compute a distinct set of random keys in the table to use for
  214. // lookups depending on the table size. And we also want to have an even
  215. // distribution of key *sizes* throughout the lookup keys, and so we can't
  216. // compute a single lookup keys array of the maximum size. Instead we need to
  217. // compute a distinct special set of lookup keys for each pair of table and
  218. // lookup size, and then shuffle that specific set into a random sequence that
  219. // is returned. This function memoizes this sequence for each pair of sizes.
  220. template <typename T>
  221. auto GetShuffledLookupKeys(ssize_t table_keys_size, ssize_t lookup_keys_size)
  222. -> llvm::ArrayRef<T> {
  223. // The raw keys aren't shuffled and round-robin through the sizes. We want to
  224. // keep the total size of lookup keys used exactly the same across runs. So
  225. // for a given size we always take the leading sequence from the raw keys for
  226. // that size, duplicating as needed to get the desired lookup sequence size,
  227. // and then shuffle the keys in that sequence to end up with a random sequence
  228. // of keys. We store each of these shuffled sequences in a map to avoid
  229. // repeatedly computing them.
  230. llvm::OwningArrayRef<T>& lookup_keys =
  231. (*lookup_keys_storage<T>)[{table_keys_size, lookup_keys_size}];
  232. if (lookup_keys.empty()) {
  233. lookup_keys = llvm::OwningArrayRef<T>(lookup_keys_size);
  234. auto raw_keys = GetRawKeys<T>();
  235. for (auto [index, key] : llvm::enumerate(lookup_keys)) {
  236. key = raw_keys[index % table_keys_size];
  237. }
  238. absl::BitGen gen;
  239. Shuffle(lookup_keys, gen);
  240. }
  241. CARBON_CHECK(static_cast<ssize_t>(lookup_keys.size()) == lookup_keys_size);
  242. return lookup_keys;
  243. }
  244. } // namespace
  245. template <typename T>
  246. auto GetKeysAndMissKeys(ssize_t table_keys_size)
  247. -> std::pair<llvm::ArrayRef<T>, llvm::ArrayRef<T>> {
  248. CARBON_CHECK(table_keys_size <= MaxNumKeys);
  249. // The raw keys aren't shuffled and round-robin through the sizes. Take the
  250. // tail of this sequence and shuffle it to form a random set of miss keys with
  251. // a consistent total size.
  252. static absl::NoDestructor<llvm::OwningArrayRef<T>> miss_keys{[] {
  253. llvm::OwningArrayRef<T> keys;
  254. keys = GetRawKeys<T>().take_back(NumOtherKeys);
  255. CARBON_CHECK(keys.size() == NumOtherKeys);
  256. absl::BitGen gen;
  257. Shuffle(keys, gen);
  258. return keys;
  259. }()};
  260. return {GetRawKeys<T>().slice(0, table_keys_size), *miss_keys};
  261. }
  262. template auto GetKeysAndMissKeys<int>(ssize_t size)
  263. -> std::pair<llvm::ArrayRef<int>, llvm::ArrayRef<int>>;
  264. template auto GetKeysAndMissKeys<int*>(ssize_t size)
  265. -> std::pair<llvm::ArrayRef<int*>, llvm::ArrayRef<int*>>;
  266. template auto GetKeysAndMissKeys<llvm::StringRef>(ssize_t size)
  267. -> std::pair<llvm::ArrayRef<llvm::StringRef>,
  268. llvm::ArrayRef<llvm::StringRef>>;
  269. template <typename T>
  270. auto GetKeysAndHitKeys(ssize_t table_keys_size, ssize_t lookup_keys_size)
  271. -> std::pair<llvm::ArrayRef<T>, llvm::ArrayRef<T>> {
  272. CARBON_CHECK(table_keys_size <= MaxNumKeys);
  273. CARBON_CHECK(lookup_keys_size <= MaxNumKeys);
  274. return {GetRawKeys<T>().slice(0, table_keys_size),
  275. GetShuffledLookupKeys<T>(table_keys_size, lookup_keys_size)};
  276. }
  277. template auto GetKeysAndHitKeys<int>(ssize_t size, ssize_t lookup_keys_size)
  278. -> std::pair<llvm::ArrayRef<int>, llvm::ArrayRef<int>>;
  279. template auto GetKeysAndHitKeys<int*>(ssize_t size, ssize_t lookup_keys_size)
  280. -> std::pair<llvm::ArrayRef<int*>, llvm::ArrayRef<int*>>;
  281. template auto GetKeysAndHitKeys<llvm::StringRef>(ssize_t size,
  282. ssize_t lookup_keys_size)
  283. -> std::pair<llvm::ArrayRef<llvm::StringRef>,
  284. llvm::ArrayRef<llvm::StringRef>>;
  285. template <typename T>
  286. auto DumpHashStatistics(llvm::ArrayRef<T> keys) -> void {
  287. if (keys.size() < GroupSize) {
  288. return;
  289. }
  290. // The hash table load factor is 7/8ths, so we want to add 1/7th of our
  291. // current size, subtract one, and pick the next power of two to get the power
  292. // of two where 7/8ths is greater than or equal to the incoming key size.
  293. ssize_t expected_size =
  294. llvm::NextPowerOf2(keys.size() + (keys.size() / 7) - 1);
  295. constexpr int GroupShift = llvm::CTLog2<GroupSize>();
  296. size_t mask = ComputeProbeMaskFromSize(expected_size);
  297. uint64_t salt = ComputeSeed();
  298. auto get_hash_index = [mask, salt](auto x) -> ssize_t {
  299. auto [hash_index, _] = HashValue(x, salt).template ExtractIndexAndTag<7>();
  300. return (hash_index & mask) >> GroupShift;
  301. };
  302. std::vector<std::vector<int>> grouped_key_indices(expected_size >>
  303. GroupShift);
  304. for (auto [i, k] : llvm::enumerate(keys)) {
  305. ssize_t hash_index = get_hash_index(k);
  306. CARBON_CHECK(hash_index < (expected_size >> GroupShift)) << hash_index;
  307. grouped_key_indices[hash_index].push_back(i);
  308. }
  309. ssize_t max_group_index =
  310. std::max_element(grouped_key_indices.begin(), grouped_key_indices.end(),
  311. [](const auto& lhs, const auto& rhs) {
  312. return lhs.size() < rhs.size();
  313. }) -
  314. grouped_key_indices.begin();
  315. // If the max number of collisions on the index is less than or equal to the
  316. // group size, there shouldn't be any necessary probing (outside of deletion)
  317. // and so this isn't interesting, skip printing.
  318. if (grouped_key_indices[max_group_index].size() <= GroupSize) {
  319. return;
  320. }
  321. llvm::errs() << "keys: " << keys.size()
  322. << " groups: " << grouped_key_indices.size() << "\n"
  323. << "max group index: " << llvm::formatv("{0x8}", max_group_index)
  324. << " collisions: "
  325. << grouped_key_indices[max_group_index].size() << "\n";
  326. for (auto i : llvm::ArrayRef(grouped_key_indices[max_group_index])
  327. .take_front(2 * GroupSize)) {
  328. auto k = keys[i];
  329. auto hash = static_cast<uint64_t>(HashValue(k, salt));
  330. llvm::errs() << " key: " << k
  331. << " salt: " << llvm::formatv("{0:x16}", salt)
  332. << " hash: " << llvm::formatv("{0:x16}", hash) << "\n";
  333. }
  334. }
  335. template auto DumpHashStatistics(llvm::ArrayRef<int> keys) -> void;
  336. template auto DumpHashStatistics(llvm::ArrayRef<int*> keys) -> void;
  337. template auto DumpHashStatistics(llvm::ArrayRef<llvm::StringRef> keys) -> void;
  338. } // namespace Carbon::RawHashtable