map_benchmark.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include <benchmark/benchmark.h>
  5. #include <boost/unordered/unordered_flat_map.hpp>
  6. #include <type_traits>
  7. #include "absl/container/flat_hash_map.h"
  8. #include "common/map.h"
  9. #include "common/raw_hashtable_benchmark_helpers.h"
  10. #include "llvm/ADT/DenseMap.h"
  11. namespace Carbon {
  12. namespace {
  13. using RawHashtable::CarbonHashDI;
  14. using RawHashtable::GetKeysAndHitKeys;
  15. using RawHashtable::GetKeysAndMissKeys;
  16. using RawHashtable::HitArgs;
  17. using RawHashtable::LowZeroBitInt;
  18. using RawHashtable::ReportTableMetrics;
  19. using RawHashtable::SizeArgs;
  20. using RawHashtable::ValueToBool;
  21. // Helpers to synthesize some value of one of the three types we use as value
  22. // types.
  23. template <typename T>
  24. auto MakeValue() -> T {
  25. if constexpr (std::is_same_v<T, llvm::StringRef>) {
  26. return "abc";
  27. } else if constexpr (std::is_pointer_v<T>) {
  28. static std::remove_pointer_t<T> x;
  29. return &x;
  30. } else {
  31. return 42;
  32. }
  33. }
  34. template <typename T>
  35. auto MakeValue2() -> T {
  36. if constexpr (std::is_same_v<T, llvm::StringRef>) {
  37. return "qux";
  38. } else if constexpr (std::is_pointer_v<T>) {
  39. static std::remove_pointer_t<T> y;
  40. return &y;
  41. } else {
  42. return 7;
  43. }
  44. }
  45. template <typename MapT>
  46. struct IsCarbonMapImpl : std::false_type {};
  47. template <typename KT, typename VT, int MinSmallSize>
  48. struct IsCarbonMapImpl<Map<KT, VT, MinSmallSize>> : std::true_type {};
  49. template <typename MapWrapperT>
  50. static constexpr bool IsCarbonMap =
  51. IsCarbonMapImpl<typename MapWrapperT::MapT>::value;
  52. // A wrapper around various map types that we specialize to implement a common
  53. // API used in the benchmarks for various different map data structures that
  54. // support different APIs. The primary template assumes a roughly
  55. // `std::unordered_map` API design, and types with a different API design are
  56. // supported through specializations.
  57. template <typename InMapT>
  58. struct MapWrapperImpl {
  59. using MapT = InMapT;
  60. using KeyT = typename MapT::key_type;
  61. using ValueT = typename MapT::mapped_type;
  62. MapT m;
  63. auto BenchContains(KeyT k) -> bool { return m.find(k) != m.end(); }
  64. auto BenchLookup(KeyT k) -> bool {
  65. auto it = m.find(k);
  66. if (it == m.end()) {
  67. return false;
  68. }
  69. return ValueToBool(it->second);
  70. }
  71. auto BenchInsert(KeyT k, ValueT v) -> bool {
  72. auto result = m.insert({k, v});
  73. return result.second;
  74. }
  75. auto BenchUpdate(KeyT k, ValueT v) -> bool {
  76. auto result = m.insert({k, v});
  77. result.first->second = v;
  78. return result.second;
  79. }
  80. auto BenchErase(KeyT k) -> bool { return m.erase(k) != 0; }
  81. };
  82. // Explicit (partial) specialization for the Carbon map type that uses its
  83. // different API design.
  84. template <typename KT, typename VT, int MinSmallSize>
  85. struct MapWrapperImpl<Map<KT, VT, MinSmallSize>> {
  86. using MapT = Map<KT, VT, MinSmallSize>;
  87. using KeyT = KT;
  88. using ValueT = VT;
  89. MapT m;
  90. auto BenchContains(KeyT k) -> bool { return m.Contains(k); }
  91. auto BenchLookup(KeyT k) -> bool {
  92. auto result = m.Lookup(k);
  93. if (!result) {
  94. return false;
  95. }
  96. return ValueToBool(result.value());
  97. }
  98. auto BenchInsert(KeyT k, ValueT v) -> bool {
  99. auto result = m.Insert(k, v);
  100. return result.is_inserted();
  101. }
  102. auto BenchUpdate(KeyT k, ValueT v) -> bool {
  103. auto result = m.Update(k, v);
  104. return result.is_inserted();
  105. }
  106. auto BenchErase(KeyT k) -> bool { return m.Erase(k); }
  107. };
  108. // Provide a way to override the Carbon Map specific benchmark runs with another
  109. // hashtable implementation. When building, you can use one of these enum names
  110. // in a macro define such as `-DCARBON_MAP_BENCH_OVERRIDE=Name` in order to
  111. // trigger a specific override for the `Map` type benchmarks. This is used to
  112. // get before/after runs that compare the performance of Carbon's Map versus
  113. // other implementations.
  114. enum class MapOverride : uint8_t {
  115. None,
  116. Abseil,
  117. Boost,
  118. LLVM,
  119. LLVMAndCarbonHash,
  120. };
  121. #ifndef CARBON_MAP_BENCH_OVERRIDE
  122. #define CARBON_MAP_BENCH_OVERRIDE None
  123. #endif
  124. template <typename MapT, MapOverride Override>
  125. struct MapWrapperOverride : MapWrapperImpl<MapT> {};
  126. template <typename KeyT, typename ValueT, int MinSmallSize>
  127. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>, MapOverride::Abseil>
  128. : MapWrapperImpl<absl::flat_hash_map<KeyT, ValueT>> {};
  129. template <typename KeyT, typename ValueT, int MinSmallSize>
  130. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>, MapOverride::Boost>
  131. : MapWrapperImpl<boost::unordered::unordered_flat_map<KeyT, ValueT>> {};
  132. template <typename KeyT, typename ValueT, int MinSmallSize>
  133. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>, MapOverride::LLVM>
  134. : MapWrapperImpl<llvm::DenseMap<KeyT, ValueT>> {};
  135. template <typename KeyT, typename ValueT, int MinSmallSize>
  136. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>,
  137. MapOverride::LLVMAndCarbonHash>
  138. : MapWrapperImpl<llvm::DenseMap<KeyT, ValueT, CarbonHashDI<KeyT>>> {};
  139. template <typename MapT>
  140. using MapWrapper =
  141. MapWrapperOverride<MapT, MapOverride::CARBON_MAP_BENCH_OVERRIDE>;
  142. template <typename MapT>
  143. auto ReportMetrics(const MapWrapper<MapT>& m_wrapper, benchmark::State& state)
  144. -> void {
  145. // Report some extra statistics about the Carbon type.
  146. if constexpr (IsCarbonMap<MapWrapper<MapT>>) {
  147. ReportTableMetrics(m_wrapper.m, state);
  148. }
  149. }
  150. // NOLINTBEGIN(bugprone-macro-parentheses): Parentheses are incorrect here.
  151. #define MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, KT, VT) \
  152. BENCHMARK(NAME<Map<KT, VT>>)->Apply(APPLY); \
  153. BENCHMARK(NAME<absl::flat_hash_map<KT, VT>>)->Apply(APPLY); \
  154. BENCHMARK(NAME<boost::unordered::unordered_flat_map<KT, VT>>)->Apply(APPLY); \
  155. BENCHMARK(NAME<llvm::DenseMap<KT, VT>>)->Apply(APPLY); \
  156. BENCHMARK(NAME<llvm::DenseMap<KT, VT, CarbonHashDI<KT>>>)->Apply(APPLY)
  157. // NOLINTEND(bugprone-macro-parentheses)
  158. #define MAP_BENCHMARK_ONE_OP(NAME, APPLY) \
  159. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int, int); \
  160. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int*, int*); \
  161. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int, llvm::StringRef); \
  162. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, llvm::StringRef, int)
  163. // Benchmark the minimal latency of checking if a key is contained within a map,
  164. // when it *is* definitely in that map. Because this is only really measuring
  165. // the *minimal* latency, it is more similar to a throughput benchmark.
  166. //
  167. // While this is structured to observe the latency of testing for presence of a
  168. // key, it is important to understand the reality of what this measures. Because
  169. // the boolean result testing for whether a key is in a map is fundamentally
  170. // provided not by accessing some data, but by branching on data to a control
  171. // flow path which sets the boolean to `true` or `false`, the result can be
  172. // speculatively provided based on predicting the conditional branch without
  173. // waiting for the results of the comparison to become available. And because
  174. // this is a small operation and we arrange for all the candidate keys to be
  175. // present, that branch *should* be predicted extremely well. The result is that
  176. // this measures the un-speculated latency of testing for presence which should
  177. // be small or zero. Which is why this is ultimately more similar to a
  178. // throughput benchmark.
  179. //
  180. // Because of these measurement oddities, the specific measurements here may not
  181. // be very interesting for predicting real-world performance in any way, but
  182. // they are useful for comparing how 'cheap' the operation is across changes to
  183. // the data structure or between similar data structures with similar
  184. // properties.
  185. template <typename MapT>
  186. static void BM_MapContainsHit(benchmark::State& state) {
  187. using MapWrapperT = MapWrapper<MapT>;
  188. using KT = typename MapWrapperT::KeyT;
  189. using VT = typename MapWrapperT::ValueT;
  190. MapWrapperT m;
  191. auto [keys, lookup_keys] =
  192. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  193. for (auto k : keys) {
  194. m.BenchInsert(k, MakeValue<VT>());
  195. }
  196. ssize_t lookup_keys_size = lookup_keys.size();
  197. while (state.KeepRunningBatch(lookup_keys_size)) {
  198. for (ssize_t i = 0; i < lookup_keys_size;) {
  199. // We block optimizing `i` as that has proven both more effective at
  200. // blocking the loop from being optimized away and avoiding disruption of
  201. // the generated code that we're benchmarking.
  202. benchmark::DoNotOptimize(i);
  203. bool result = m.BenchContains(lookup_keys[i]);
  204. CARBON_DCHECK(result);
  205. // We use the lookup success to step through keys, establishing a
  206. // dependency between each lookup. This doesn't fully allow us to measure
  207. // latency rather than throughput, as noted above.
  208. i += static_cast<ssize_t>(result);
  209. }
  210. }
  211. ReportMetrics(m, state);
  212. }
  213. MAP_BENCHMARK_ONE_OP(BM_MapContainsHit, HitArgs);
  214. // Similar to `BM_MapContainsHit`, while this is structured as a latency
  215. // benchmark, the critical path is expected to be well predicted and so it
  216. // should turn into something closer to a throughput benchmark.
  217. template <typename MapT>
  218. static void BM_MapContainsMiss(benchmark::State& state) {
  219. using MapWrapperT = MapWrapper<MapT>;
  220. using KT = typename MapWrapperT::KeyT;
  221. using VT = typename MapWrapperT::ValueT;
  222. MapWrapperT m;
  223. auto [keys, lookup_keys] = GetKeysAndMissKeys<KT>(state.range(0));
  224. for (auto k : keys) {
  225. m.BenchInsert(k, MakeValue<VT>());
  226. }
  227. ssize_t lookup_keys_size = lookup_keys.size();
  228. while (state.KeepRunningBatch(lookup_keys_size)) {
  229. for (ssize_t i = 0; i < lookup_keys_size;) {
  230. benchmark::DoNotOptimize(i);
  231. bool result = m.BenchContains(lookup_keys[i]);
  232. CARBON_DCHECK(!result);
  233. i += static_cast<ssize_t>(!result);
  234. }
  235. }
  236. ReportMetrics(m, state);
  237. }
  238. MAP_BENCHMARK_ONE_OP(BM_MapContainsMiss, SizeArgs);
  239. // This is a genuine latency benchmark. We lookup a key in the hashtable and use
  240. // the value associated with that key in the critical path of loading the next
  241. // iteration's key. We still ensure the keys are always present, and so we
  242. // generally expect the data structure branches to be well predicted. But we
  243. // vary the keys aggressively to avoid any prediction artifacts from repeatedly
  244. // examining the same key.
  245. //
  246. // This latency can be very helpful for understanding a range of data structure
  247. // behaviors:
  248. // - Many users of hashtables are directly dependent on the latency of this
  249. // operation, and this micro-benchmark will reflect the expected latency for
  250. // them.
  251. // - Showing how latency varies across different sizes of table and different
  252. // fractions of the table being accessed (and thus needing space in the
  253. // cache).
  254. //
  255. // However, it remains an ultimately synthetic and unrepresentative benchmark.
  256. // It should primarily be used to understand the relative cost of these
  257. // operations between versions of the data structure or between related data
  258. // structures.
  259. //
  260. // We vary both the number of entries in the table and the number of distinct
  261. // keys used when doing lookups. As the table becomes large, the latter dictates
  262. // the fraction of the table that will be accessed and thus the working set size
  263. // of the benchmark. Querying the same small number of keys in even a large
  264. // table doesn't actually encounter any cache pressure, so only a few of these
  265. // benchmarks will show any effects of the caching subsystem.
  266. template <typename MapT>
  267. static void BM_MapLookupHit(benchmark::State& state) {
  268. using MapWrapperT = MapWrapper<MapT>;
  269. using KT = typename MapWrapperT::KeyT;
  270. using VT = typename MapWrapperT::ValueT;
  271. MapWrapperT m;
  272. auto [keys, lookup_keys] =
  273. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  274. for (auto k : keys) {
  275. m.BenchInsert(k, MakeValue<VT>());
  276. }
  277. ssize_t lookup_keys_size = lookup_keys.size();
  278. while (state.KeepRunningBatch(lookup_keys_size)) {
  279. for (ssize_t i = 0; i < lookup_keys_size;) {
  280. benchmark::DoNotOptimize(i);
  281. bool result = m.BenchLookup(lookup_keys[i]);
  282. CARBON_DCHECK(result);
  283. i += static_cast<ssize_t>(result);
  284. }
  285. }
  286. ReportMetrics(m, state);
  287. }
  288. MAP_BENCHMARK_ONE_OP(BM_MapLookupHit, HitArgs);
  289. // We also do some minimal benchmarking with integers that have a
  290. // large number of low zero bits shifted into them. These present particular
  291. // challenges to the hashing strategy Carbon's hash tables use and so they help
  292. // form stress tests and benchmark to make sure the hash function quality
  293. // remains reasonable even under adverse conditions. We can't go past a certain
  294. // limit here without our hash tables becoming impossibly slow due to complete
  295. // collapse of the hash functions -- if we ever need to hash integers with more
  296. // than 32 low zero bits, we'll ask that code to use a custom hash algorithm.
  297. //
  298. // We don't benchmark these everywhere as they only provide marginal information
  299. // beyond the core types, and checking just this operation covers that
  300. // sufficiently.
  301. MAP_BENCHMARK_ONE_OP_SIZE(BM_MapLookupHit, HitArgs, LowZeroBitInt<12>, int);
  302. MAP_BENCHMARK_ONE_OP_SIZE(BM_MapLookupHit, HitArgs, LowZeroBitInt<24>, int);
  303. MAP_BENCHMARK_ONE_OP_SIZE(BM_MapLookupHit, HitArgs, LowZeroBitInt<32>, int);
  304. // This is an update throughput benchmark in practice. While whether the key was
  305. // a hit is kept in the critical path, we only use keys that are hits and so
  306. // expect that to be fully predicted and speculated.
  307. //
  308. // However, we expect this fairly closely matches how user code interacts with
  309. // an update-style API. It will have some conditional testing (even if just an
  310. // assert) on whether the key was a hit and otherwise continue executing. As a
  311. // consequence the actual update is expected to not be in a meaningful critical
  312. // path.
  313. //
  314. // This still provides a basic way to measure the cost of this operation,
  315. // especially when comparing between implementations or across different hash
  316. // tables.
  317. template <typename MapT>
  318. static void BM_MapUpdateHit(benchmark::State& state) {
  319. using MapWrapperT = MapWrapper<MapT>;
  320. using KT = typename MapWrapperT::KeyT;
  321. using VT = typename MapWrapperT::ValueT;
  322. MapWrapperT m;
  323. auto [keys, lookup_keys] =
  324. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  325. for (auto k : keys) {
  326. m.BenchInsert(k, MakeValue<VT>());
  327. }
  328. ssize_t lookup_keys_size = lookup_keys.size();
  329. while (state.KeepRunningBatch(lookup_keys_size)) {
  330. for (ssize_t i = 0; i < lookup_keys_size; ++i) {
  331. benchmark::DoNotOptimize(i);
  332. bool inserted = m.BenchUpdate(lookup_keys[i], MakeValue2<VT>());
  333. CARBON_DCHECK(!inserted);
  334. }
  335. }
  336. ReportMetrics(m, state);
  337. }
  338. MAP_BENCHMARK_ONE_OP(BM_MapUpdateHit, HitArgs);
  339. // First erase and then insert the key. The code path will always be the same
  340. // here and so we expect this to largely be a throughput benchmark because of
  341. // branch prediction and speculative execution.
  342. //
  343. // We don't expect erase followed by insertion to be a common user code
  344. // sequence, but we don't have a good way of benchmarking either erase or insert
  345. // in isolation -- each would change the size of the table and thus the next
  346. // iteration's benchmark. And if we try to correct the table size outside of the
  347. // timed region, we end up trying to exclude too fine grained of a region from
  348. // timers to get good measurement data.
  349. //
  350. // Our solution is to benchmark both erase and insertion back to back. We can
  351. // then get a good profile of the code sequence of each, and at least measure
  352. // the sum cost of these reliably. Careful profiling can help attribute that
  353. // cost between erase and insert in order to understand which of the two
  354. // operations is contributing most to any performance artifacts observed.
  355. template <typename MapT>
  356. static void BM_MapEraseUpdateHit(benchmark::State& state) {
  357. using MapWrapperT = MapWrapper<MapT>;
  358. using KT = typename MapWrapperT::KeyT;
  359. using VT = typename MapWrapperT::ValueT;
  360. MapWrapperT m;
  361. auto [keys, lookup_keys] =
  362. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  363. for (auto k : keys) {
  364. m.BenchInsert(k, MakeValue<VT>());
  365. }
  366. ssize_t lookup_keys_size = lookup_keys.size();
  367. while (state.KeepRunningBatch(lookup_keys_size)) {
  368. for (ssize_t i = 0; i < lookup_keys_size; ++i) {
  369. benchmark::DoNotOptimize(i);
  370. m.BenchErase(lookup_keys[i]);
  371. benchmark::ClobberMemory();
  372. bool inserted = m.BenchUpdate(lookup_keys[i], MakeValue2<VT>());
  373. CARBON_DCHECK(inserted);
  374. }
  375. }
  376. }
  377. MAP_BENCHMARK_ONE_OP(BM_MapEraseUpdateHit, HitArgs);
  378. // NOLINTBEGIN(bugprone-macro-parentheses): Parentheses are incorrect here.
  379. #define MAP_BENCHMARK_OP_SEQ_SIZE(NAME, KT, VT) \
  380. BENCHMARK(NAME<Map<KT, VT>>)->Apply(SizeArgs); \
  381. BENCHMARK(NAME<absl::flat_hash_map<KT, VT>>)->Apply(SizeArgs); \
  382. BENCHMARK(NAME<boost::unordered::unordered_flat_map<KT, VT>>) \
  383. ->Apply(SizeArgs); \
  384. BENCHMARK(NAME<llvm::DenseMap<KT, VT>>)->Apply(APPLY); \
  385. BENCHMARK(NAME<llvm::DenseMap<KT, VT, CarbonHashDI<KT>>>)->Apply(SizeArgs)
  386. // NOLINTEND(bugprone-macro-parentheses)
  387. #define MAP_BENCHMARK_OP_SEQ(NAME) \
  388. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int, int); \
  389. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int*, int*); \
  390. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int, llvm::StringRef); \
  391. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, llvm::StringRef, int)
  392. // This is an interesting, somewhat specialized benchmark that measures the cost
  393. // of inserting a sequence of key/value pairs into a table with no collisions up
  394. // to some size and then inserting a colliding key and throwing away the table.
  395. //
  396. // This can give an idea of the cost of building up a map of a particular size,
  397. // but without actually using it. Or of algorithms like cycle-detection which
  398. // for some reason need an associative container.
  399. //
  400. // It also covers both the insert-into-an-empty-slot code path that isn't
  401. // covered elsewhere, and the code path for growing a table to a larger size.
  402. //
  403. // Because this benchmark operates on whole maps, we also compute the number of
  404. // probed keys for Carbon's set as that is both a general reflection of the
  405. // efficacy of the underlying hash function, and a direct factor that drives the
  406. // cost of these operations.
  407. template <typename MapT>
  408. static void BM_MapInsertSeq(benchmark::State& state) {
  409. using MapWrapperT = MapWrapper<MapT>;
  410. using KT = typename MapWrapperT::KeyT;
  411. using VT = typename MapWrapperT::ValueT;
  412. constexpr ssize_t LookupKeysSize = 1 << 8;
  413. auto [keys, lookup_keys] =
  414. GetKeysAndHitKeys<KT>(state.range(0), LookupKeysSize);
  415. // Note that we don't force batches that use all the lookup keys because
  416. // there's no difference in cache usage by covering all the different lookup
  417. // keys.
  418. ssize_t i = 0;
  419. for (auto _ : state) {
  420. benchmark::DoNotOptimize(i);
  421. MapWrapperT m;
  422. for (auto k : keys) {
  423. bool inserted = m.BenchInsert(k, MakeValue<VT>());
  424. CARBON_DCHECK(inserted, "Must be a successful insert!");
  425. }
  426. // Now insert a final random repeated key.
  427. bool inserted = m.BenchInsert(lookup_keys[i], MakeValue2<VT>());
  428. CARBON_DCHECK(!inserted, "Must already be in the map!");
  429. // Rotate through the shuffled keys.
  430. i = (i + static_cast<ssize_t>(!inserted)) & (LookupKeysSize - 1);
  431. }
  432. // It can be easier in some cases to think of this as a key-throughput rate of
  433. // insertion rather than the latency of inserting N keys, so construct the
  434. // rate counter as well.
  435. state.counters["KeyRate"] = benchmark::Counter(
  436. keys.size(), benchmark::Counter::kIsIterationInvariantRate);
  437. // Report some extra statistics about the Carbon type.
  438. if constexpr (IsCarbonMap<MapWrapperT>) {
  439. // Re-build a map outside of the timing loop to look at the statistics
  440. // rather than the timing.
  441. MapWrapperT m;
  442. for (auto k : keys) {
  443. bool inserted = m.BenchInsert(k, MakeValue<VT>());
  444. CARBON_DCHECK(inserted, "Must be a successful insert!");
  445. }
  446. ReportMetrics(m, state);
  447. // Uncomment this call to print out statistics about the index-collisions
  448. // among these keys for debugging:
  449. //
  450. // RawHashtable::DumpHashStatistics(keys);
  451. }
  452. }
  453. MAP_BENCHMARK_ONE_OP(BM_MapInsertSeq, SizeArgs);
  454. } // namespace
  455. } // namespace Carbon