set_benchmark.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include <benchmark/benchmark.h>
  5. #include <type_traits>
  6. #include "absl/container/flat_hash_set.h"
  7. #include "common/raw_hashtable_benchmark_helpers.h"
  8. #include "common/set.h"
  9. #include "llvm/ADT/DenseSet.h"
  10. namespace Carbon {
  11. namespace {
  12. using RawHashtable::CarbonHashDI;
  13. using RawHashtable::GetKeysAndHitKeys;
  14. using RawHashtable::GetKeysAndMissKeys;
  15. using RawHashtable::HitArgs;
  16. using RawHashtable::ReportTableMetrics;
  17. using RawHashtable::SizeArgs;
  18. using RawHashtable::ValueToBool;
  19. template <typename SetT>
  20. struct IsCarbonSetImpl : std::false_type {};
  21. template <typename KT, int MinSmallSize>
  22. struct IsCarbonSetImpl<Set<KT, MinSmallSize>> : std::true_type {};
  23. template <typename SetT>
  24. static constexpr bool IsCarbonSet = IsCarbonSetImpl<SetT>::value;
  25. // A wrapper around various set types that we specialize to implement a common
  26. // API used in the benchmarks for various different map data structures that
  27. // support different APIs. The primary template assumes a roughly
  28. // `std::unordered_set` API design, and types with a different API design are
  29. // supported through specializations.
  30. template <typename SetT>
  31. struct SetWrapperImpl {
  32. using KeyT = typename SetT::key_type;
  33. SetT s;
  34. auto BenchContains(KeyT k) -> bool { return s.find(k) != s.end(); }
  35. auto BenchLookup(KeyT k) -> bool {
  36. auto it = s.find(k);
  37. if (it == s.end()) {
  38. return false;
  39. }
  40. // We expect keys to always convert to `true` so directly return that here.
  41. return ValueToBool(*it);
  42. }
  43. auto BenchInsert(KeyT k) -> bool {
  44. auto result = s.insert(k);
  45. return result.second;
  46. }
  47. auto BenchErase(KeyT k) -> bool { return s.erase(k) != 0; }
  48. };
  49. // Explicit (partial) specialization for the Carbon map type that uses its
  50. // different API design.
  51. template <typename KT, int MinSmallSize>
  52. struct SetWrapperImpl<Set<KT, MinSmallSize>> {
  53. using SetT = Set<KT, MinSmallSize>;
  54. using KeyT = KT;
  55. SetT s;
  56. auto BenchContains(KeyT k) -> bool { return s.Contains(k); }
  57. auto BenchLookup(KeyT k) -> bool {
  58. auto result = s.Lookup(k);
  59. if (!result) {
  60. return false;
  61. }
  62. return ValueToBool(result.key());
  63. }
  64. auto BenchInsert(KeyT k) -> bool {
  65. auto result = s.Insert(k);
  66. return result.is_inserted();
  67. }
  68. auto BenchErase(KeyT k) -> bool { return s.Erase(k); }
  69. };
  70. // Provide a way to override the Carbon Set specific benchmark runs with another
  71. // hashtable implementation. When building, you can use one of these enum names
  72. // in a macro define such as `-DCARBON_SET_BENCH_OVERRIDE=Name` in order to
  73. // trigger a specific override for the `Set` type benchmarks. This is used to
  74. // get before/after runs that compare the performance of Carbon's Set versus
  75. // other implementations.
  76. enum class SetOverride : uint8_t {
  77. Abseil,
  78. LLVM,
  79. LLVMAndCarbonHash,
  80. };
  81. template <typename SetT, SetOverride Override>
  82. struct SetWrapperOverride : SetWrapperImpl<SetT> {};
  83. template <typename KeyT, int MinSmallSize>
  84. struct SetWrapperOverride<Set<KeyT, MinSmallSize>, SetOverride::Abseil>
  85. : SetWrapperImpl<absl::flat_hash_set<KeyT>> {};
  86. template <typename KeyT, int MinSmallSize>
  87. struct SetWrapperOverride<Set<KeyT, MinSmallSize>, SetOverride::LLVM>
  88. : SetWrapperImpl<llvm::DenseSet<KeyT>> {};
  89. template <typename KeyT, int MinSmallSize>
  90. struct SetWrapperOverride<Set<KeyT, MinSmallSize>,
  91. SetOverride::LLVMAndCarbonHash>
  92. : SetWrapperImpl<llvm::DenseSet<KeyT, CarbonHashDI<KeyT>>> {};
  93. #ifndef CARBON_SET_BENCH_OVERRIDE
  94. template <typename SetT>
  95. using SetWrapper = SetWrapperImpl<SetT>;
  96. #else
  97. template <typename SetT>
  98. using SetWrapper =
  99. SetWrapperOverride<SetT, SetOverride::CARBON_SET_BENCH_OVERRIDE>;
  100. #endif
  101. // NOLINTBEGIN(bugprone-macro-parentheses): Parentheses are incorrect here.
  102. #define MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, KT) \
  103. BENCHMARK(NAME<Set<KT>>)->Apply(APPLY); \
  104. BENCHMARK(NAME<absl::flat_hash_set<KT>>)->Apply(APPLY); \
  105. BENCHMARK(NAME<llvm::DenseSet<KT>>)->Apply(APPLY); \
  106. BENCHMARK(NAME<llvm::DenseSet<KT, CarbonHashDI<KT>>>)->Apply(APPLY)
  107. // NOLINTEND(bugprone-macro-parentheses)
  108. #define MAP_BENCHMARK_ONE_OP(NAME, APPLY) \
  109. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int); \
  110. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int*); \
  111. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, llvm::StringRef)
  112. // Benchmark the "latency" of testing for a key in a set. This always tests with
  113. // a key that is found.
  114. //
  115. // However, because the key is always found and because the test ultimately
  116. // involves conditional control flow that can be predicted, we expect modern
  117. // CPUs to perfectly predict the control flow here and turn the measurement from
  118. // one iteration to the next into a throughput measurement rather than a real
  119. // latency measurement.
  120. //
  121. // However, this does represent a particularly common way in which a set data
  122. // structure is accessed. The numbers should just be carefully interpreted in
  123. // the context of being more a reflection of reciprocal throughput than actual
  124. // latency. See the `Lookup` benchmarks for a genuine latency measure with its
  125. // own caveats.
  126. //
  127. // However, this does still show some interesting caching effects when querying
  128. // large fractions of large tables, and can give a sense of the inescapable
  129. // magnitude of these effects even when there is a great deal of prediction and
  130. // speculative execution to hide memory access latency.
  131. template <typename SetT>
  132. static void BM_SetContainsHitPtr(benchmark::State& state) {
  133. using SetWrapperT = SetWrapper<SetT>;
  134. using KT = typename SetWrapperT::KeyT;
  135. SetWrapperT s;
  136. auto [keys, lookup_keys] =
  137. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  138. for (auto k : keys) {
  139. s.BenchInsert(k);
  140. }
  141. ssize_t lookup_keys_size = lookup_keys.size();
  142. while (state.KeepRunningBatch(lookup_keys_size)) {
  143. for (ssize_t i = 0; i < lookup_keys_size;) {
  144. // We block optimizing `i` as that has proven both more effective at
  145. // blocking the loop from being optimized away and avoiding disruption of
  146. // the generated code that we're benchmarking.
  147. benchmark::DoNotOptimize(i);
  148. bool result = s.BenchContains(lookup_keys[i]);
  149. CARBON_DCHECK(result);
  150. // We use the lookup success to step through keys, establishing a
  151. // dependency between each lookup. This doesn't fully allow us to measure
  152. // latency rather than throughput, as noted above.
  153. i += static_cast<ssize_t>(result);
  154. }
  155. }
  156. }
  157. MAP_BENCHMARK_ONE_OP(BM_SetContainsHitPtr, HitArgs);
  158. // Benchmark the "latency" (but more likely the reciprocal throughput, see
  159. // comment above) of testing for a key in the set that is *not* present.
  160. template <typename SetT>
  161. static void BM_SetContainsMissPtr(benchmark::State& state) {
  162. using SetWrapperT = SetWrapper<SetT>;
  163. using KT = typename SetWrapperT::KeyT;
  164. SetWrapperT s;
  165. auto [keys, lookup_keys] = GetKeysAndMissKeys<KT>(state.range(0));
  166. for (auto k : keys) {
  167. s.BenchInsert(k);
  168. }
  169. ssize_t lookup_keys_size = lookup_keys.size();
  170. while (state.KeepRunningBatch(lookup_keys_size)) {
  171. for (ssize_t i = 0; i < lookup_keys_size;) {
  172. benchmark::DoNotOptimize(i);
  173. bool result = s.BenchContains(lookup_keys[i]);
  174. CARBON_DCHECK(!result);
  175. i += static_cast<ssize_t>(!result);
  176. }
  177. }
  178. }
  179. MAP_BENCHMARK_ONE_OP(BM_SetContainsMissPtr, SizeArgs);
  180. // A somewhat contrived latency test for the lookup code path.
  181. //
  182. // While lookups into a set are often (but not always) simply used to influence
  183. // control flow, that style of access produces difficult to evaluate benchmark
  184. // results (see the comments on the `Contains` benchmarks above).
  185. //
  186. // So here we actually access the key in the set and convert that key's value to
  187. // a boolean on the critical path of each iteration. This lets us have a genuine
  188. // latency benchmark of looking up a key in the set, at the expense of being
  189. // somewhat contrived. That said, for usage where the key object is queried or
  190. // operated on in some way once looked up in the set, this will be fairly
  191. // representative of the latency cost from the data structure.
  192. template <typename SetT>
  193. static void BM_SetLookupHitPtr(benchmark::State& state) {
  194. using SetWrapperT = SetWrapper<SetT>;
  195. using KT = typename SetWrapperT::KeyT;
  196. SetWrapperT s;
  197. auto [keys, lookup_keys] =
  198. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  199. for (auto k : keys) {
  200. s.BenchInsert(k);
  201. }
  202. ssize_t lookup_keys_size = lookup_keys.size();
  203. while (state.KeepRunningBatch(lookup_keys_size)) {
  204. for (ssize_t i = 0; i < lookup_keys_size;) {
  205. benchmark::DoNotOptimize(i);
  206. bool result = s.BenchLookup(lookup_keys[i]);
  207. CARBON_DCHECK(result);
  208. i += static_cast<ssize_t>(result);
  209. }
  210. }
  211. }
  212. MAP_BENCHMARK_ONE_OP(BM_SetLookupHitPtr, HitArgs);
  213. // First erase and then insert the key. The code path will always be the same
  214. // here and so we expect this to largely be a throughput benchmark because of
  215. // branch prediction and speculative execution.
  216. //
  217. // We don't expect erase followed by insertion to be a common user code
  218. // sequence, but we don't have a good way of benchmarking either erase or insert
  219. // in isolation -- each would change the size of the table and thus the next
  220. // iteration's benchmark. And if we try to correct the table size outside of the
  221. // timed region, we end up trying to exclude too fine grained of a region from
  222. // timers to get good measurement data.
  223. //
  224. // Our solution is to benchmark both erase and insertion back to back. We can
  225. // then get a good profile of the code sequence of each, and at least measure
  226. // the sum cost of these reliably. Careful profiling can help attribute that
  227. // cost between erase and insert in order to understand which of the two
  228. // operations is contributing most to any performance artifacts observed.
  229. template <typename SetT>
  230. static void BM_SetEraseInsertHitPtr(benchmark::State& state) {
  231. using SetWrapperT = SetWrapper<SetT>;
  232. using KT = typename SetWrapperT::KeyT;
  233. SetWrapperT s;
  234. auto [keys, lookup_keys] =
  235. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  236. for (auto k : keys) {
  237. s.BenchInsert(k);
  238. }
  239. ssize_t lookup_keys_size = lookup_keys.size();
  240. while (state.KeepRunningBatch(lookup_keys_size)) {
  241. for (ssize_t i = 0; i < lookup_keys_size;) {
  242. benchmark::DoNotOptimize(i);
  243. s.BenchErase(lookup_keys[i]);
  244. benchmark::ClobberMemory();
  245. bool inserted = s.BenchInsert(lookup_keys[i]);
  246. CARBON_DCHECK(inserted);
  247. i += static_cast<ssize_t>(inserted);
  248. }
  249. }
  250. }
  251. MAP_BENCHMARK_ONE_OP(BM_SetEraseInsertHitPtr, HitArgs);
  252. // NOLINTBEGIN(bugprone-macro-parentheses): Parentheses are incorrect here.
  253. #define MAP_BENCHMARK_OP_SEQ_SIZE(NAME, KT) \
  254. BENCHMARK(NAME<Set<KT>>)->Apply(SizeArgs); \
  255. BENCHMARK(NAME<absl::flat_hash_set<KT>>)->Apply(SizeArgs); \
  256. BENCHMARK(NAME<llvm::DenseSet<KT>>)->Apply(SizeArgs); \
  257. BENCHMARK(NAME<llvm::DenseSet<KT, CarbonHashDI<KT>>>)->Apply(SizeArgs)
  258. // NOLINTEND(bugprone-macro-parentheses)
  259. #define MAP_BENCHMARK_OP_SEQ(NAME) \
  260. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int); \
  261. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int*); \
  262. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, llvm::StringRef)
  263. // This is an interesting, somewhat specialized benchmark that measures the cost
  264. // of inserting a sequence of keys into a set up to some size and then inserting
  265. // a colliding key and throwing away the set.
  266. //
  267. // This is an especially important usage pattern for sets as a large number of
  268. // algorithms essentially look like this, such as collision detection, cycle
  269. // detection, de-duplication, etc.
  270. //
  271. // It also covers both the insert-into-an-empty-slot code path that isn't
  272. // covered elsewhere, and the code path for growing a table to a larger size.
  273. //
  274. // This is the second most important aspect of expected set usage after testing
  275. // for presence. It also nicely lends itself to a single benchmark that covers
  276. // the total cost of this usage pattern.
  277. //
  278. // Because this benchmark operates on whole sets, we also compute the number of
  279. // probed keys for Carbon's set as that is both a general reflection of the
  280. // efficacy of the underlying hash function, and a direct factor that drives the
  281. // cost of these operations.
  282. template <typename SetT>
  283. static void BM_SetInsertSeq(benchmark::State& state) {
  284. using SetWrapperT = SetWrapper<SetT>;
  285. using KT = typename SetWrapperT::KeyT;
  286. constexpr ssize_t LookupKeysSize = 1 << 8;
  287. auto [keys, lookup_keys] =
  288. GetKeysAndHitKeys<KT>(state.range(0), LookupKeysSize);
  289. // Now build a large shuffled set of keys (with duplicates) we'll use at the
  290. // end.
  291. ssize_t i = 0;
  292. for (auto _ : state) {
  293. benchmark::DoNotOptimize(i);
  294. SetWrapperT s;
  295. for (auto k : keys) {
  296. bool inserted = s.BenchInsert(k);
  297. CARBON_DCHECK(inserted, "Must be a successful insert!");
  298. }
  299. // Now insert a final random repeated key.
  300. bool inserted = s.BenchInsert(lookup_keys[i]);
  301. CARBON_DCHECK(!inserted, "Must already be in the map!");
  302. // Rotate through the shuffled keys.
  303. i = (i + static_cast<ssize_t>(!inserted)) & (LookupKeysSize - 1);
  304. }
  305. // It can be easier in some cases to think of this as a key-throughput rate of
  306. // insertion rather than the latency of inserting N keys, so construct the
  307. // rate counter as well.
  308. state.counters["KeyRate"] = benchmark::Counter(
  309. keys.size(), benchmark::Counter::kIsIterationInvariantRate);
  310. // Report some extra statistics about the Carbon type.
  311. if constexpr (IsCarbonSet<SetT>) {
  312. // Re-build a set outside of the timing loop to look at the statistics
  313. // rather than the timing.
  314. SetT s;
  315. for (auto k : keys) {
  316. bool inserted = s.Insert(k).is_inserted();
  317. CARBON_DCHECK(inserted, "Must be a successful insert!");
  318. }
  319. ReportTableMetrics(s, state);
  320. // Uncomment this call to print out statistics about the index-collisions
  321. // among these keys for debugging:
  322. //
  323. // RawHashtable::DumpHashStatistics(raw_keys);
  324. }
  325. }
  326. MAP_BENCHMARK_OP_SEQ(BM_SetInsertSeq);
  327. } // namespace
  328. } // namespace Carbon