hashing_test.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "common/hashing.h"
  5. #include <gmock/gmock.h>
  6. #include <gtest/gtest.h>
  7. #include <concepts>
  8. #include <type_traits>
  9. #include "llvm/ADT/Sequence.h"
  10. #include "llvm/ADT/StringExtras.h"
  11. #include "llvm/Support/FormatVariadic.h"
  12. #include "llvm/Support/TypeName.h"
  13. namespace Carbon {
  14. namespace {
  15. using ::testing::Eq;
  16. using ::testing::Le;
  17. using ::testing::Ne;
  18. TEST(HashingTest, HashCodeAPI) {
  19. // Manually compute a few hash codes where we can exercise the underlying API.
  20. HashCode empty = HashValue("");
  21. HashCode a = HashValue("a");
  22. HashCode b = HashValue("b");
  23. ASSERT_THAT(HashValue(""), Eq(empty));
  24. ASSERT_THAT(HashValue("a"), Eq(a));
  25. ASSERT_THAT(HashValue("b"), Eq(b));
  26. ASSERT_THAT(empty, Ne(a));
  27. ASSERT_THAT(empty, Ne(b));
  28. ASSERT_THAT(a, Ne(b));
  29. // Exercise the methods in basic ways across a few sizes. This doesn't check
  30. // much beyond stability across re-computed values, crashing, or hitting UB.
  31. EXPECT_THAT(HashValue("a").ExtractIndex(), Eq(a.ExtractIndex()));
  32. EXPECT_THAT(a.ExtractIndex(), Ne(b.ExtractIndex()));
  33. EXPECT_THAT(a.ExtractIndex(), Ne(empty.ExtractIndex()));
  34. // The tag shouldn't have bits set outside the range requested.
  35. EXPECT_THAT(HashValue("a").ExtractIndexAndTag<1>().second & ~0b1, Eq(0));
  36. EXPECT_THAT(HashValue("a").ExtractIndexAndTag<2>().second & ~0b11, Eq(0));
  37. EXPECT_THAT(HashValue("a").ExtractIndexAndTag<3>().second & ~0b111, Eq(0));
  38. EXPECT_THAT(HashValue("a").ExtractIndexAndTag<4>().second & ~0b1111, Eq(0));
  39. // Note that the index produced with a tag may be different from the index
  40. // alone!
  41. EXPECT_THAT(HashValue("a").ExtractIndexAndTag<2>(),
  42. Eq(a.ExtractIndexAndTag<2>()));
  43. EXPECT_THAT(HashValue("a").ExtractIndexAndTag<16>(),
  44. Eq(a.ExtractIndexAndTag<16>()));
  45. EXPECT_THAT(HashValue("a").ExtractIndexAndTag<7>(),
  46. Eq(a.ExtractIndexAndTag<7>()));
  47. const auto [a_index, a_tag] = a.ExtractIndexAndTag<4>();
  48. const auto [b_index, b_tag] = b.ExtractIndexAndTag<4>();
  49. EXPECT_THAT(a_index, Ne(b_index));
  50. EXPECT_THAT(a_tag, Ne(b_tag));
  51. }
  52. TEST(HashingTest, Integers) {
  53. for (int64_t i : {0, 1, 2, 3, 42, -1, -2, -3, -13}) {
  54. SCOPED_TRACE(llvm::formatv("Hashing: {0}", i).str());
  55. auto test_int_hash = [](auto i) {
  56. using T = decltype(i);
  57. SCOPED_TRACE(
  58. llvm::formatv("Hashing type: {0}", llvm::getTypeName<T>()).str());
  59. HashCode hash = HashValue(i);
  60. // Hashes should be stable within the execution.
  61. EXPECT_THAT(HashValue(i), Eq(hash));
  62. // Zero should match, and other integers shouldn't collide trivially.
  63. HashCode hash_zero = HashValue(static_cast<T>(0));
  64. if (i == 0) {
  65. EXPECT_THAT(hash, Eq(hash_zero));
  66. } else {
  67. EXPECT_THAT(hash, Ne(hash_zero));
  68. }
  69. };
  70. test_int_hash(static_cast<int8_t>(i));
  71. test_int_hash(static_cast<uint8_t>(i));
  72. test_int_hash(static_cast<int16_t>(i));
  73. test_int_hash(static_cast<uint16_t>(i));
  74. test_int_hash(static_cast<int32_t>(i));
  75. test_int_hash(static_cast<uint32_t>(i));
  76. // `i` is already an int64_t variable.
  77. test_int_hash(i);
  78. test_int_hash(static_cast<uint64_t>(i));
  79. }
  80. }
  81. TEST(HashingTest, BasicSeeding) {
  82. auto unseeded_hash = HashValue(42);
  83. EXPECT_THAT(unseeded_hash, Ne(HashValue(42, 1)));
  84. EXPECT_THAT(unseeded_hash, Ne(HashValue(42, 2)));
  85. EXPECT_THAT(unseeded_hash, Ne(HashValue(42, 3)));
  86. EXPECT_THAT(unseeded_hash,
  87. Ne(HashValue(42, static_cast<uint64_t>(unseeded_hash))));
  88. }
  89. TEST(HashingTest, Pointers) {
  90. int object1 = 42;
  91. std::string object2 =
  92. "Hello World! This is a long-ish string so it ends up on the heap!";
  93. HashCode hash_null = HashValue(nullptr);
  94. // Hashes should be stable.
  95. EXPECT_THAT(HashValue(nullptr), Eq(hash_null));
  96. // Hash other kinds of pointers without trivial collisions.
  97. HashCode hash1 = HashValue(&object1);
  98. HashCode hash2 = HashValue(&object2);
  99. HashCode hash3 = HashValue(object2.data());
  100. EXPECT_THAT(hash1, Ne(hash_null));
  101. EXPECT_THAT(hash2, Ne(hash_null));
  102. EXPECT_THAT(hash3, Ne(hash_null));
  103. EXPECT_THAT(hash1, Ne(hash2));
  104. EXPECT_THAT(hash1, Ne(hash3));
  105. EXPECT_THAT(hash2, Ne(hash3));
  106. // Hash values reflect the address and not the type.
  107. EXPECT_THAT(HashValue(static_cast<void*>(nullptr)), Eq(hash_null));
  108. EXPECT_THAT(HashValue(static_cast<int*>(nullptr)), Eq(hash_null));
  109. EXPECT_THAT(HashValue(static_cast<std::string*>(nullptr)), Eq(hash_null));
  110. EXPECT_THAT(HashValue(reinterpret_cast<void*>(&object1)), Eq(hash1));
  111. EXPECT_THAT(HashValue(reinterpret_cast<int*>(&object2)), Eq(hash2));
  112. EXPECT_THAT(HashValue(reinterpret_cast<std::string*>(object2.data())),
  113. Eq(hash3));
  114. }
  115. TEST(HashingTest, PairsAndTuples) {
  116. // Note that we can't compare hash codes across arity, or in general, compare
  117. // hash codes for different types as the type isn't part of the hash. These
  118. // hashes are targeted at use in hash tables which pick a single type that's
  119. // the basis of any comparison.
  120. HashCode hash_00 = HashValue(std::pair(0, 0));
  121. HashCode hash_01 = HashValue(std::pair(0, 1));
  122. HashCode hash_10 = HashValue(std::pair(1, 0));
  123. HashCode hash_11 = HashValue(std::pair(1, 1));
  124. EXPECT_THAT(hash_00, Ne(hash_01));
  125. EXPECT_THAT(hash_00, Ne(hash_10));
  126. EXPECT_THAT(hash_00, Ne(hash_11));
  127. EXPECT_THAT(hash_01, Ne(hash_10));
  128. EXPECT_THAT(hash_01, Ne(hash_11));
  129. EXPECT_THAT(hash_10, Ne(hash_11));
  130. HashCode hash_000 = HashValue(std::tuple(0, 0, 0));
  131. HashCode hash_001 = HashValue(std::tuple(0, 0, 1));
  132. HashCode hash_010 = HashValue(std::tuple(0, 1, 0));
  133. HashCode hash_011 = HashValue(std::tuple(0, 1, 1));
  134. HashCode hash_100 = HashValue(std::tuple(1, 0, 0));
  135. HashCode hash_101 = HashValue(std::tuple(1, 0, 1));
  136. HashCode hash_110 = HashValue(std::tuple(1, 1, 0));
  137. HashCode hash_111 = HashValue(std::tuple(1, 1, 1));
  138. EXPECT_THAT(hash_000, Ne(hash_001));
  139. EXPECT_THAT(hash_000, Ne(hash_010));
  140. EXPECT_THAT(hash_000, Ne(hash_011));
  141. EXPECT_THAT(hash_000, Ne(hash_100));
  142. EXPECT_THAT(hash_000, Ne(hash_101));
  143. EXPECT_THAT(hash_000, Ne(hash_110));
  144. EXPECT_THAT(hash_000, Ne(hash_111));
  145. EXPECT_THAT(hash_001, Ne(hash_010));
  146. EXPECT_THAT(hash_001, Ne(hash_011));
  147. EXPECT_THAT(hash_001, Ne(hash_100));
  148. EXPECT_THAT(hash_001, Ne(hash_101));
  149. EXPECT_THAT(hash_001, Ne(hash_110));
  150. EXPECT_THAT(hash_001, Ne(hash_111));
  151. EXPECT_THAT(hash_010, Ne(hash_011));
  152. EXPECT_THAT(hash_010, Ne(hash_100));
  153. EXPECT_THAT(hash_010, Ne(hash_101));
  154. EXPECT_THAT(hash_010, Ne(hash_110));
  155. EXPECT_THAT(hash_010, Ne(hash_111));
  156. EXPECT_THAT(hash_011, Ne(hash_100));
  157. EXPECT_THAT(hash_011, Ne(hash_101));
  158. EXPECT_THAT(hash_011, Ne(hash_110));
  159. EXPECT_THAT(hash_011, Ne(hash_111));
  160. EXPECT_THAT(hash_100, Ne(hash_101));
  161. EXPECT_THAT(hash_100, Ne(hash_110));
  162. EXPECT_THAT(hash_100, Ne(hash_111));
  163. EXPECT_THAT(hash_101, Ne(hash_110));
  164. EXPECT_THAT(hash_101, Ne(hash_111));
  165. EXPECT_THAT(hash_110, Ne(hash_111));
  166. // Hashing a 2-tuple and a pair should produce identical results, so pairs
  167. // are compatible with code using things like variadic tuple construction.
  168. EXPECT_THAT(HashValue(std::tuple(0, 0)), Eq(hash_00));
  169. EXPECT_THAT(HashValue(std::tuple(0, 1)), Eq(hash_01));
  170. EXPECT_THAT(HashValue(std::tuple(1, 0)), Eq(hash_10));
  171. EXPECT_THAT(HashValue(std::tuple(1, 1)), Eq(hash_11));
  172. // Integers in tuples should also work.
  173. for (int i : {0, 1, 2, 3, 42, -1, -2, -3, -13}) {
  174. SCOPED_TRACE(llvm::formatv("Hashing: ({0}, {0}, {0})", i).str());
  175. auto test_int_tuple_hash = [](auto i) {
  176. using T = decltype(i);
  177. SCOPED_TRACE(
  178. llvm::formatv("Hashing integer type: {0}", llvm::getTypeName<T>())
  179. .str());
  180. std::tuple v = {i, i, i};
  181. HashCode hash = HashValue(v);
  182. // Hashes should be stable within the execution.
  183. EXPECT_THAT(HashValue(v), Eq(hash));
  184. // Zero should match, and other integers shouldn't collide trivially.
  185. T zero = 0;
  186. std::tuple zero_tuple = {zero, zero, zero};
  187. HashCode hash_zero = HashValue(zero_tuple);
  188. if (i == 0) {
  189. EXPECT_THAT(hash, Eq(hash_zero));
  190. } else {
  191. EXPECT_THAT(hash, Ne(hash_zero));
  192. }
  193. };
  194. test_int_tuple_hash(i);
  195. test_int_tuple_hash(static_cast<int8_t>(i));
  196. test_int_tuple_hash(static_cast<uint8_t>(i));
  197. test_int_tuple_hash(static_cast<int16_t>(i));
  198. test_int_tuple_hash(static_cast<uint16_t>(i));
  199. test_int_tuple_hash(static_cast<int32_t>(i));
  200. test_int_tuple_hash(static_cast<uint32_t>(i));
  201. test_int_tuple_hash(static_cast<int64_t>(i));
  202. test_int_tuple_hash(static_cast<uint64_t>(i));
  203. // Heterogeneous integer types should also work, but we only support
  204. // comparing against hashes of tuples with the exact same type.
  205. using T1 = std::tuple<int8_t, uint32_t, int16_t>;
  206. using T2 = std::tuple<uint32_t, int16_t, uint64_t>;
  207. if (i == 0) {
  208. EXPECT_THAT(HashValue(T1{i, i, i}), Eq(HashValue(T1{0, 0, 0})));
  209. EXPECT_THAT(HashValue(T2{i, i, i}), Eq(HashValue(T2{0, 0, 0})));
  210. } else {
  211. EXPECT_THAT(HashValue(T1{i, i, i}), Ne(HashValue(T1{0, 0, 0})));
  212. EXPECT_THAT(HashValue(T2{i, i, i}), Ne(HashValue(T2{0, 0, 0})));
  213. }
  214. }
  215. // Hash values of pointers in pairs and tuples reflect the address and not the
  216. // type. Pairs and 2-tuples give the same hash values.
  217. HashCode hash_2null = HashValue(std::pair(nullptr, nullptr));
  218. EXPECT_THAT(HashValue(std::tuple(static_cast<int*>(nullptr),
  219. static_cast<double*>(nullptr))),
  220. Eq(hash_2null));
  221. // Hash other kinds of pointers without trivial collisions.
  222. int object1 = 42;
  223. std::string object2 = "Hello world!";
  224. HashCode hash_3ptr =
  225. HashValue(std::tuple(&object1, &object2, object2.data()));
  226. EXPECT_THAT(hash_3ptr, Ne(HashValue(std::tuple(nullptr, nullptr, nullptr))));
  227. // Hash values reflect the address and not the type.
  228. EXPECT_THAT(
  229. HashValue(std::tuple(reinterpret_cast<void*>(&object1),
  230. reinterpret_cast<int*>(&object2),
  231. reinterpret_cast<std::string*>(object2.data()))),
  232. Eq(hash_3ptr));
  233. }
  234. TEST(HashingTest, BasicStrings) {
  235. llvm::SmallVector<std::pair<std::string, HashCode>> hashes;
  236. for (int size : {0, 1, 2, 4, 16, 64, 256, 1024}) {
  237. std::string s(size, 'a');
  238. hashes.push_back({s, HashValue(s)});
  239. }
  240. for (const auto& [s1, hash1] : hashes) {
  241. EXPECT_THAT(HashValue(s1), Eq(hash1));
  242. // Also check that we get the same hashes even when using string-wrapping
  243. // types.
  244. EXPECT_THAT(HashValue(std::string_view(s1)), Eq(hash1));
  245. EXPECT_THAT(HashValue(llvm::StringRef(s1)), Eq(hash1));
  246. // And some basic tests that simple things don't collide.
  247. for (const auto& [s2, hash2] : hashes) {
  248. if (s1 != s2) {
  249. EXPECT_THAT(hash1, Ne(hash2))
  250. << "Matching hashes for '" << s1 << "' and '" << s2 << "'";
  251. }
  252. }
  253. }
  254. }
  255. TEST(HashingTest, ArrayLike) {
  256. int c_array[] = {1, 2, 3, 4};
  257. llvm::ArrayRef arr = c_array;
  258. EXPECT_THAT(HashValue(c_array), Eq(HashValue(arr)));
  259. EXPECT_THAT(HashValue(std::array{1, 2, 3, 4}), Eq(HashValue(arr)));
  260. EXPECT_THAT(HashValue(std::vector{1, 2, 3, 4}), Eq(HashValue(arr)));
  261. EXPECT_THAT(HashValue(llvm::SmallVector<int>{1, 2, 3, 4}),
  262. Eq(HashValue(arr)));
  263. }
  264. TEST(HashingTest, HashAPInt) {
  265. // The bit width should be hashed as well as the value.
  266. llvm::APInt one_64(/*numBits=*/64, /*val=*/1);
  267. llvm::APInt two_64(/*numBits=*/64, /*val=*/2);
  268. llvm::APInt one_128(/*numBits=*/128, /*val=*/1);
  269. llvm::APInt two_128(/*numBits=*/128, /*val=*/2);
  270. std::array array = {one_64, two_64, one_128, two_128};
  271. for (int i : llvm::seq<int>(array.size())) {
  272. EXPECT_THAT(HashValue(array[i]), Eq(HashValue(array[i])));
  273. for (int j : llvm::seq<int>(i + 1, array.size())) {
  274. EXPECT_THAT(HashValue(array[i]), Ne(HashValue(array[j])))
  275. << "Hashing #" << i << " and #" << j;
  276. }
  277. }
  278. }
  279. TEST(HashingTest, HashAPFloat) {
  280. // Hashtable equality for `APFloat` uses a bitwise comparison. This
  281. // differentiates between various things that would otherwise not make sense:
  282. // - Different floating point semantics
  283. // - `-0.0` and `0.0`
  284. //
  285. // It also allows NaNs to be compared meaningfully.
  286. llvm::APFloat zero_float =
  287. llvm::APFloat::getZero(llvm::APFloat::IEEEsingle());
  288. llvm::APFloat neg_zero_float =
  289. llvm::APFloat::getZero(llvm::APFloat::IEEEsingle(), /*Negative=*/true);
  290. llvm::APFloat zero_double =
  291. llvm::APFloat::getZero(llvm::APFloat::IEEEdouble());
  292. llvm::APFloat zero_bfloat = llvm::APFloat::getZero(llvm::APFloat::BFloat());
  293. llvm::APFloat one_float = llvm::APFloat::getOne(llvm::APFloat::IEEEsingle());
  294. llvm::APFloat inf_float = llvm::APFloat::getInf(llvm::APFloat::IEEEsingle());
  295. llvm::APFloat nan_0_float = llvm::APFloat::getNaN(
  296. llvm::APFloat::IEEEsingle(), /*Negative=*/false, /*payload=*/0);
  297. llvm::APFloat nan_42_float = llvm::APFloat::getNaN(
  298. llvm::APFloat::IEEEsingle(), /*Negative=*/false, /*payload=*/42);
  299. std::array array = {zero_float, neg_zero_float, zero_double, zero_bfloat,
  300. one_float, inf_float, nan_42_float};
  301. for (int i : llvm::seq<int>(array.size())) {
  302. EXPECT_THAT(HashValue(array[i]), Eq(HashValue(array[i])));
  303. for (int j : llvm::seq<int>(i + 1, array.size())) {
  304. EXPECT_THAT(HashValue(array[i]), Ne(HashValue(array[j])))
  305. << "Hashing #" << i << " and #" << j;
  306. }
  307. }
  308. // Note that currently we use LLVM's hashing of `APFloat` which does *not*
  309. // hash the payload of NaNs.
  310. EXPECT_THAT(HashValue(nan_0_float), Eq(HashValue(nan_42_float)));
  311. }
  312. // A type that has hashing customization. However, it also works to be small and
  313. // appear to have a unique object representation. This helps ensure that when a
  314. // user provides custom hashing it is reliably used.
  315. struct HashableType {
  316. int8_t x;
  317. int8_t y;
  318. int16_t ignored = 0;
  319. // Provide the hashing but try to craft a relatively low-ranking overload to
  320. // help ensure that the hashing framework doesn't accidentally override this.
  321. template <typename T>
  322. requires(std::same_as<T, HashableType>)
  323. friend auto CarbonHashValue(const T& value, uint64_t seed) -> HashCode {
  324. Hasher hasher(seed);
  325. hasher.Hash(value.x, value.y);
  326. return static_cast<HashCode>(hasher);
  327. }
  328. };
  329. static_assert(std::has_unique_object_representations_v<HashableType>);
  330. TEST(HashingTest, CustomType) {
  331. HashableType a = {.x = 1, .y = 2};
  332. HashableType b = {.x = 3, .y = 4};
  333. EXPECT_THAT(HashValue(a), Eq(HashValue(a)));
  334. EXPECT_THAT(HashValue(a), Ne(HashValue(b)));
  335. // Differences in an ignored field have no impact.
  336. HashableType c = {.x = 3, .y = 4, .ignored = 42};
  337. EXPECT_THAT(HashValue(c), Eq(HashValue(b)));
  338. }
  339. TEST(HashingTest, ArrayRecursion) {
  340. // Make sure we correctly recurse when hashing an array and don't try to use
  341. // the object representation.
  342. llvm::APInt one_64(/*numBits=*/64, /*val=*/1);
  343. llvm::APInt two_64(/*numBits=*/64, /*val=*/2);
  344. llvm::APInt one_128(/*numBits=*/128, /*val=*/1);
  345. llvm::APInt two_128(/*numBits=*/128, /*val=*/2);
  346. std::array apint_array = {one_64, two_64, one_128, two_128};
  347. EXPECT_THAT(HashValue(apint_array),
  348. Eq(HashValue(std::array{one_64, two_64, one_128, two_128})));
  349. EXPECT_THAT(HashValue(apint_array),
  350. Ne(HashValue(std::array{one_64, two_64, two_128, one_128})));
  351. EXPECT_THAT(HashValue(apint_array),
  352. Ne(HashValue(std::array{one_64, two_64, one_64, two_128})));
  353. EXPECT_THAT(HashValue(apint_array),
  354. Ne(HashValue(std::array{one_64, two_128, one_128, two_128})));
  355. EXPECT_THAT(HashValue(apint_array),
  356. Ne(HashValue(std::array{one_64, two_64, one_128})));
  357. EXPECT_THAT(
  358. HashValue(apint_array),
  359. Ne(HashValue(std::array{one_64, two_64, one_128, two_128, two_128})));
  360. // Also test for a custom type that still *looks* like plain data.
  361. HashableType a = {.x = 1, .y = 2};
  362. HashableType b = {.x = 3, .y = 4};
  363. HashableType c = {.x = 3, .y = 4, .ignored = 42};
  364. std::array custom_array = {a, b, c, a};
  365. EXPECT_THAT(HashValue(custom_array), Eq(HashValue(std::array{a, b, c, a})));
  366. EXPECT_THAT(HashValue(custom_array), Eq(HashValue(std::array{a, b, b, a})));
  367. EXPECT_THAT(HashValue(custom_array), Ne(HashValue(std::array{a, b, c, b})));
  368. EXPECT_THAT(HashValue(custom_array), Ne(HashValue(std::array{a, b, a, c})));
  369. EXPECT_THAT(HashValue(custom_array), Ne(HashValue(std::array{a, b, c})));
  370. EXPECT_THAT(HashValue(custom_array),
  371. Ne(HashValue(std::array{a, b, c, a, a})));
  372. }
  373. TEST(HashingTest, TupleRecursion) {
  374. // Make sure we can hash pairs and tuples which require us to recurse for each
  375. // element rather than treating the whole object as raw storage.
  376. // We can use APInt values to help test this.
  377. llvm::APInt one_64(/*numBits=*/64, /*val=*/1);
  378. llvm::APInt two_64(/*numBits=*/64, /*val=*/2);
  379. llvm::APInt one_128(/*numBits=*/128, /*val=*/1);
  380. llvm::APInt two_128(/*numBits=*/128, /*val=*/2);
  381. EXPECT_THAT(HashValue(std::pair{one_64, one_128}),
  382. Eq(HashValue(std::pair{one_64, one_128})));
  383. EXPECT_THAT(HashValue(std::pair{one_64, one_128}),
  384. Ne(HashValue(std::pair{one_64, two_64})));
  385. EXPECT_THAT(HashValue(std::pair{one_64, one_128}),
  386. Ne(HashValue(std::pair{one_64, one_64})));
  387. EXPECT_THAT(HashValue(std::pair{one_64, one_128}),
  388. Ne(HashValue(std::pair{one_128, one_64})));
  389. EXPECT_THAT(HashValue(std::tuple{one_64, one_128, two_64}),
  390. Eq(HashValue(std::tuple{one_64, one_128, two_64})));
  391. EXPECT_THAT(HashValue(std::tuple{one_64, one_128, two_64}),
  392. Ne(HashValue(std::tuple{one_64, two_64, two_64})));
  393. EXPECT_THAT(HashValue(std::tuple{one_64, one_128, two_64}),
  394. Ne(HashValue(std::tuple{one_64, one_64, two_64})));
  395. EXPECT_THAT(HashValue(std::tuple{one_64, one_128, two_64}),
  396. Ne(HashValue(std::tuple{one_64, two_64, one_128})));
  397. EXPECT_THAT(HashValue(std::tuple{one_64, one_128, two_64}),
  398. Ne(HashValue(std::tuple{one_64, one_128})));
  399. // Also test for a custom type that still *looks* like plain data.
  400. HashableType a = {.x = 1, .y = 2};
  401. HashableType b = {.x = 3, .y = 4};
  402. HashableType c = {.x = 3, .y = 4, .ignored = 42};
  403. EXPECT_THAT(HashValue(std::pair{a, b}), Eq(HashValue(std::pair{a, b})));
  404. EXPECT_THAT(HashValue(std::pair{a, b}), Ne(HashValue(std::pair{a, a})));
  405. EXPECT_THAT(HashValue(std::pair{a, b}), Ne(HashValue(std::pair{b, a})));
  406. EXPECT_THAT(HashValue(std::pair{a, b}), Eq(HashValue(std::pair{a, c})));
  407. EXPECT_THAT(HashValue(std::tuple{a, b, a}),
  408. Eq(HashValue(std::tuple{a, b, a})));
  409. EXPECT_THAT(HashValue(std::tuple{a, b, a}),
  410. Ne(HashValue(std::tuple{a, b, b})));
  411. EXPECT_THAT(HashValue(std::tuple{a, b, a}),
  412. Ne(HashValue(std::tuple{a, a, a})));
  413. EXPECT_THAT(HashValue(std::tuple{a, b, a}),
  414. Eq(HashValue(std::tuple{a, c, a})));
  415. }
  416. // The only significantly bad seed is zero, so pick a non-zero seed with a tiny
  417. // amount of entropy to make sure that none of the testing relies on the entropy
  418. // from this.
  419. constexpr uint64_t TestSeed = 42ULL * 1024;
  420. auto ToHexBytes(llvm::StringRef s) -> std::string {
  421. std::string rendered;
  422. llvm::raw_string_ostream os(rendered);
  423. os << "{";
  424. llvm::ListSeparator sep(", ");
  425. for (const char c : s) {
  426. os << sep << llvm::formatv("{0:x2}", static_cast<uint8_t>(c));
  427. }
  428. os << "}";
  429. return rendered;
  430. }
  431. template <typename T>
  432. struct HashedValue {
  433. HashCode hash;
  434. T v;
  435. };
  436. using HashedString = HashedValue<std::string>;
  437. template <typename T>
  438. auto PrintFullWidthHex(llvm::raw_ostream& os, T value) {
  439. static_assert(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
  440. sizeof(T) == 8);
  441. // Given the nature of a format string and the good formatting, a nested
  442. // conditional seems like the most readable structure.
  443. // NOLINTBEGIN(readability-avoid-nested-conditional-operator)
  444. os << llvm::formatv(sizeof(T) == 1 ? "{0:x2}"
  445. : sizeof(T) == 2 ? "{0:x4}"
  446. : sizeof(T) == 4 ? "{0:x8}"
  447. : "{0:x16}",
  448. static_cast<uint64_t>(value));
  449. // NOLINTEND(readability-avoid-nested-conditional-operator)
  450. }
  451. template <typename T>
  452. requires std::integral<T>
  453. auto operator<<(llvm::raw_ostream& os, HashedValue<T> hv)
  454. -> llvm::raw_ostream& {
  455. os << "hash " << hv.hash << " for value ";
  456. PrintFullWidthHex(os, hv.v);
  457. return os;
  458. }
  459. template <typename T, typename U>
  460. requires std::integral<T> && std::integral<U>
  461. auto operator<<(llvm::raw_ostream& os, HashedValue<std::pair<T, U>> hv)
  462. -> llvm::raw_ostream& {
  463. os << "hash " << hv.hash << " for pair of ";
  464. PrintFullWidthHex(os, hv.v.first);
  465. os << " and ";
  466. PrintFullWidthHex(os, hv.v.second);
  467. return os;
  468. }
  469. struct Collisions {
  470. int total;
  471. int median;
  472. int max;
  473. };
  474. // Analyzes a list of hashed values to find all of the hash codes which collide
  475. // within a specific bit-range.
  476. //
  477. // With `BitBegin=0` and `BitEnd=64`, this is equivalent to finding full
  478. // collisions. But when the begin and end of the bit range are narrower than the
  479. // 64-bits of the hash code, it allows this function to analyze a specific
  480. // window of bits within the 64-bit hash code to understand how many collisions
  481. // emerge purely within that bit range.
  482. //
  483. // With narrow ranges (we often look at the first N and last N bits for small
  484. // N), collisions are common and so this function summarizes this with the total
  485. // number of collisions and the median number of collisions for an input value.
  486. template <int BitBegin, int BitEnd, typename T>
  487. auto FindBitRangeCollisions(llvm::ArrayRef<HashedValue<T>> hashes)
  488. -> Collisions {
  489. static_assert(BitBegin < BitEnd);
  490. constexpr int BitCount = BitEnd - BitBegin;
  491. static_assert(BitCount <= 32);
  492. constexpr int BitShift = BitBegin;
  493. constexpr uint64_t BitMask = ((1ULL << BitCount) - 1) << BitShift;
  494. // We collect counts of collisions in a vector. Initially, we just have a zero
  495. // and all inputs map to that collision count. As we discover collisions,
  496. // we'll create a dedicated counter for it and count how many inputs collide.
  497. llvm::SmallVector<int> collision_counts;
  498. collision_counts.push_back(0);
  499. // The "map" for collision counts. Each input hashed value has a corresponding
  500. // index stored here. That index is the index of the collision count in the
  501. // container above. We resize this to fill it with zeros to start as the zero
  502. // index above has a collision count of zero.
  503. //
  504. // The result of this is that the number of collisions for `hashes[i]` is
  505. // `collision_counts[collision_map[i]]`.
  506. llvm::SmallVector<int> collision_map;
  507. collision_map.resize(hashes.size());
  508. // First, we extract the bit subsequence we want to examine from each hash and
  509. // store it with an index back into the hashed values (or the collision map).
  510. //
  511. // The result is that, `bits_and_indices[i].bits` has the hash bits of
  512. // interest from `hashes[bits_and_indices[i].index]`.
  513. //
  514. // And because `collision_map` above uses the same indices as `hashes`,
  515. // `collision_counts[collision_map[bits_and_indices[i].index]]` is the number
  516. // of collisions for `bits_and_indices[i].bits`.
  517. struct BitSequenceAndHashIndex {
  518. // The bit subsequence of a hash input, adjusted into the low bits.
  519. uint32_t bits;
  520. // The index of the hash input corresponding to this bit sequence.
  521. int index;
  522. };
  523. llvm::SmallVector<BitSequenceAndHashIndex> bits_and_indices;
  524. bits_and_indices.reserve(hashes.size());
  525. for (const auto& [hash, v] : hashes) {
  526. CARBON_DCHECK(v == hashes[bits_and_indices.size()].v);
  527. auto hash_bits = (static_cast<uint64_t>(hash) & BitMask) >> BitShift;
  528. bits_and_indices.push_back(
  529. {.bits = static_cast<uint32_t>(hash_bits),
  530. .index = static_cast<int>(bits_and_indices.size())});
  531. }
  532. // Now we sort by the extracted bit sequence so we can efficiently scan for
  533. // colliding bit patterns.
  534. std::sort(
  535. bits_and_indices.begin(), bits_and_indices.end(),
  536. [](const auto& lhs, const auto& rhs) { return lhs.bits < rhs.bits; });
  537. // Scan the sorted bit sequences we've extracted looking for collisions. We
  538. // count the total collisions, but we also track the number of individual
  539. // inputs that collide with each specific bit pattern.
  540. uint32_t prev_hash_bits = bits_and_indices[0].bits;
  541. int prev_index = bits_and_indices[0].index;
  542. bool in_collision = false;
  543. int total = 0;
  544. for (const auto& [hash_bits, hash_index] :
  545. llvm::ArrayRef(bits_and_indices).slice(1)) {
  546. // Check if we've found a new hash (and thus a new value), reset everything.
  547. CARBON_CHECK(hashes[prev_index].v != hashes[hash_index].v);
  548. if (hash_bits != prev_hash_bits) {
  549. CARBON_CHECK(hashes[prev_index].hash != hashes[hash_index].hash);
  550. prev_hash_bits = hash_bits;
  551. prev_index = hash_index;
  552. in_collision = false;
  553. continue;
  554. }
  555. // Otherwise, we have a colliding bit sequence.
  556. ++total;
  557. // If we've already created a collision count to track this, just increment
  558. // it and map this hash to it.
  559. if (in_collision) {
  560. ++collision_counts.back();
  561. collision_map[hash_index] = collision_counts.size() - 1;
  562. continue;
  563. }
  564. // If this is a new collision, create a dedicated count to track it and
  565. // begin counting.
  566. in_collision = true;
  567. collision_map[prev_index] = collision_counts.size();
  568. collision_map[hash_index] = collision_counts.size();
  569. collision_counts.push_back(1);
  570. }
  571. // Sort by collision count for each hash.
  572. std::sort(bits_and_indices.begin(), bits_and_indices.end(),
  573. [&](const auto& lhs, const auto& rhs) {
  574. return collision_counts[collision_map[lhs.index]] <
  575. collision_counts[collision_map[rhs.index]];
  576. });
  577. // And compute the median and max.
  578. int median = collision_counts
  579. [collision_map[bits_and_indices[bits_and_indices.size() / 2].index]];
  580. int max = *std::max_element(collision_counts.begin(), collision_counts.end());
  581. CARBON_CHECK(max ==
  582. collision_counts[collision_map[bits_and_indices.back().index]]);
  583. return {.total = total, .median = median, .max = max};
  584. }
  585. auto CheckNoDuplicateValues(llvm::ArrayRef<HashedString> hashes) -> void {
  586. for (int i = 0, size = hashes.size(); i < size - 1; ++i) {
  587. const auto& [_, value] = hashes[i];
  588. CARBON_CHECK(value != hashes[i + 1].v) << "Duplicate value: " << value;
  589. }
  590. }
  591. template <int N>
  592. auto AllByteStringsHashedAndSorted() {
  593. static_assert(N < 5, "Can only generate all 4-byte strings or shorter.");
  594. llvm::SmallVector<HashedString> hashes;
  595. int64_t count = 1LL << (N * 8);
  596. for (int64_t i : llvm::seq(count)) {
  597. uint8_t bytes[N];
  598. for (int j : llvm::seq(N)) {
  599. bytes[j] = (static_cast<uint64_t>(i) >> (8 * j)) & 0xff;
  600. }
  601. std::string s(std::begin(bytes), std::end(bytes));
  602. hashes.push_back({HashValue(s, TestSeed), s});
  603. }
  604. std::sort(hashes.begin(), hashes.end(),
  605. [](const HashedString& lhs, const HashedString& rhs) {
  606. return static_cast<uint64_t>(lhs.hash) <
  607. static_cast<uint64_t>(rhs.hash);
  608. });
  609. CheckNoDuplicateValues(hashes);
  610. return hashes;
  611. }
  612. auto ExpectNoHashCollisions(llvm::ArrayRef<HashedString> hashes) -> void {
  613. HashCode prev_hash = hashes[0].hash;
  614. llvm::StringRef prev_s = hashes[0].v;
  615. for (const auto& [hash, s] : hashes.slice(1)) {
  616. if (hash != prev_hash) {
  617. prev_hash = hash;
  618. prev_s = s;
  619. continue;
  620. }
  621. FAIL() << "Colliding hash '" << hash << "' of strings "
  622. << ToHexBytes(prev_s) << " and " << ToHexBytes(s);
  623. }
  624. }
  625. TEST(HashingTest, Collisions1ByteSized) {
  626. auto hashes_storage = AllByteStringsHashedAndSorted<1>();
  627. auto hashes = llvm::ArrayRef(hashes_storage);
  628. ExpectNoHashCollisions(hashes);
  629. auto low_32bit_collisions = FindBitRangeCollisions<0, 32>(hashes);
  630. EXPECT_THAT(low_32bit_collisions.total, Eq(0));
  631. auto high_32bit_collisions = FindBitRangeCollisions<32, 64>(hashes);
  632. EXPECT_THAT(high_32bit_collisions.total, Eq(0));
  633. // We expect collisions when only looking at 7-bits of the hash. However,
  634. // modern hash table designs need to use either the low or high 7 bits as tags
  635. // for faster searching. So we add some direct testing that the median and max
  636. // collisions for any given key stay within bounds. We express the bounds in
  637. // terms of the minimum expected "perfect" rate of collisions if uniformly
  638. // distributed.
  639. int min_7bit_collisions = llvm::NextPowerOf2(hashes.size() - 1) / (1 << 7);
  640. auto low_7bit_collisions = FindBitRangeCollisions<0, 7>(hashes);
  641. EXPECT_THAT(low_7bit_collisions.median, Le(8 * min_7bit_collisions));
  642. EXPECT_THAT(low_7bit_collisions.max, Le(8 * min_7bit_collisions));
  643. auto high_7bit_collisions = FindBitRangeCollisions<64 - 7, 64>(hashes);
  644. EXPECT_THAT(high_7bit_collisions.median, Le(2 * min_7bit_collisions));
  645. EXPECT_THAT(high_7bit_collisions.max, Le(4 * min_7bit_collisions));
  646. }
  647. TEST(HashingTest, Collisions2ByteSized) {
  648. auto hashes_storage = AllByteStringsHashedAndSorted<2>();
  649. auto hashes = llvm::ArrayRef(hashes_storage);
  650. ExpectNoHashCollisions(hashes);
  651. auto low_32bit_collisions = FindBitRangeCollisions<0, 32>(hashes);
  652. EXPECT_THAT(low_32bit_collisions.total, Eq(0));
  653. auto high_32bit_collisions = FindBitRangeCollisions<32, 64>(hashes);
  654. EXPECT_THAT(high_32bit_collisions.total, Eq(0));
  655. // Similar to 1-byte keys, we do expect a certain rate of collisions here but
  656. // bound the median and max.
  657. int min_7bit_collisions = llvm::NextPowerOf2(hashes.size() - 1) / (1 << 7);
  658. auto low_7bit_collisions = FindBitRangeCollisions<0, 7>(hashes);
  659. EXPECT_THAT(low_7bit_collisions.median, Le(2 * min_7bit_collisions));
  660. EXPECT_THAT(low_7bit_collisions.max, Le(2 * min_7bit_collisions));
  661. auto high_7bit_collisions = FindBitRangeCollisions<64 - 7, 64>(hashes);
  662. EXPECT_THAT(high_7bit_collisions.median, Le(2 * min_7bit_collisions));
  663. EXPECT_THAT(high_7bit_collisions.max, Le(2 * min_7bit_collisions));
  664. }
  665. // Generate and hash all strings of of [BeginByteCount, EndByteCount) bytes,
  666. // with [BeginSetBitCount, EndSetBitCount) contiguous bits at each possible bit
  667. // offset set to one and all other bits set to zero.
  668. template <int BeginByteCount, int EndByteCount, int BeginSetBitCount,
  669. int EndSetBitCount>
  670. struct SparseHashTestParamRanges {
  671. static_assert(BeginByteCount >= 0);
  672. static_assert(BeginByteCount < EndByteCount);
  673. static_assert(BeginSetBitCount >= 0);
  674. static_assert(BeginSetBitCount < EndSetBitCount);
  675. // Note that we intentionally allow the end-set-bit-count to result in more
  676. // set bits than are available -- we truncate the number of set bits to fit
  677. // within the byte string.
  678. static_assert(BeginSetBitCount <= BeginByteCount * 8);
  679. struct ByteCount {
  680. static constexpr int Begin = BeginByteCount;
  681. static constexpr int End = EndByteCount;
  682. };
  683. struct SetBitCount {
  684. static constexpr int Begin = BeginSetBitCount;
  685. static constexpr int End = EndSetBitCount;
  686. };
  687. };
  688. template <typename ParamRanges>
  689. struct SparseHashTest : ::testing::Test {
  690. using ByteCount = typename ParamRanges::ByteCount;
  691. using SetBitCount = typename ParamRanges::SetBitCount;
  692. static auto GetHashedByteStrings() {
  693. llvm::SmallVector<HashedString> hashes;
  694. for (int byte_count :
  695. llvm::seq_inclusive(ByteCount::Begin, ByteCount::End)) {
  696. int bits = byte_count * 8;
  697. for (int set_bit_count : llvm::seq_inclusive(
  698. SetBitCount::Begin, std::min(bits, SetBitCount::End))) {
  699. if (set_bit_count == 0) {
  700. std::string s(byte_count, '\0');
  701. hashes.push_back({HashValue(s, TestSeed), std::move(s)});
  702. continue;
  703. }
  704. for (int begin_set_bit : llvm::seq_inclusive(0, bits - set_bit_count)) {
  705. std::string s(byte_count, '\0');
  706. int begin_set_bit_byte_index = begin_set_bit / 8;
  707. int begin_set_bit_bit_index = begin_set_bit % 8;
  708. int end_set_bit_byte_index = (begin_set_bit + set_bit_count) / 8;
  709. int end_set_bit_bit_index = (begin_set_bit + set_bit_count) % 8;
  710. // We build a begin byte and end byte. We set the begin byte, set
  711. // subsequent bytes up to *and including* the end byte to all ones,
  712. // and then mask the end byte. For multi-byte runs, the mask just sets
  713. // the end byte and for single-byte runs the mask computes the
  714. // intersecting bits.
  715. //
  716. // Consider a 4-set-bit count, starting at bit 2. The begin bit index
  717. // is 2, and the end bit index is 6.
  718. //
  719. // Begin byte: 0b11111111 -(shl 2)-----> 0b11111100
  720. // End byte: 0b11111111 -(shr (8-6))-> 0b00111111
  721. // Masked byte: 0b00111100
  722. //
  723. // Or a 10-set-bit-count starting at bit 2. The begin bit index is 2,
  724. // the end byte index is (12 / 8) or 1, and the end bit index is (12 %
  725. // 8) or 4.
  726. //
  727. // Begin byte: 0b11111111 -(shl 2)-----> 0b11111100 -> 6 bits
  728. // End byte: 0b11111111 -(shr (8-4))-> 0b00001111 -> 4 bits
  729. // 10 total bits
  730. //
  731. uint8_t begin_set_bit_byte = 0xFFU << begin_set_bit_bit_index;
  732. uint8_t end_set_bit_byte = 0xFFU >> (8 - end_set_bit_bit_index);
  733. bool has_end_byte_bits = end_set_bit_byte != 0;
  734. s[begin_set_bit_byte_index] = begin_set_bit_byte;
  735. for (int i : llvm::seq(begin_set_bit_byte_index + 1,
  736. end_set_bit_byte_index + has_end_byte_bits)) {
  737. s[i] = '\xFF';
  738. }
  739. // If there are no bits set in the end byte, it may be past-the-end
  740. // and we can't even mask a zero byte safely.
  741. if (has_end_byte_bits) {
  742. s[end_set_bit_byte_index] &= end_set_bit_byte;
  743. }
  744. hashes.push_back({HashValue(s, TestSeed), std::move(s)});
  745. }
  746. }
  747. }
  748. std::sort(hashes.begin(), hashes.end(),
  749. [](const HashedString& lhs, const HashedString& rhs) {
  750. return static_cast<uint64_t>(lhs.hash) <
  751. static_cast<uint64_t>(rhs.hash);
  752. });
  753. CheckNoDuplicateValues(hashes);
  754. return hashes;
  755. }
  756. };
  757. using SparseHashTestParams = ::testing::Types<
  758. SparseHashTestParamRanges</*BeginByteCount=*/0, /*EndByteCount=*/256,
  759. /*BeginSetBitCount=*/0, /*EndSetBitCount=*/1>,
  760. SparseHashTestParamRanges</*BeginByteCount=*/1, /*EndByteCount=*/128,
  761. /*BeginSetBitCount=*/2, /*EndSetBitCount=*/4>,
  762. SparseHashTestParamRanges</*BeginByteCount=*/1, /*EndByteCount=*/64,
  763. /*BeginSetBitCount=*/4, /*EndSetBitCount=*/16>>;
  764. TYPED_TEST_SUITE(SparseHashTest, SparseHashTestParams);
  765. TYPED_TEST(SparseHashTest, Collisions) {
  766. auto hashes_storage = this->GetHashedByteStrings();
  767. auto hashes = llvm::ArrayRef(hashes_storage);
  768. ExpectNoHashCollisions(hashes);
  769. int min_7bit_collisions = llvm::NextPowerOf2(hashes.size() - 1) / (1 << 7);
  770. auto low_7bit_collisions = FindBitRangeCollisions<0, 7>(hashes);
  771. EXPECT_THAT(low_7bit_collisions.median, Le(2 * min_7bit_collisions));
  772. EXPECT_THAT(low_7bit_collisions.max, Le(2 * min_7bit_collisions));
  773. auto high_7bit_collisions = FindBitRangeCollisions<64 - 7, 64>(hashes);
  774. EXPECT_THAT(high_7bit_collisions.median, Le(2 * min_7bit_collisions));
  775. EXPECT_THAT(high_7bit_collisions.max, Le(2 * min_7bit_collisions));
  776. }
  777. } // namespace
  778. } // namespace Carbon