map_benchmark.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include <benchmark/benchmark.h>
  5. #include <boost/unordered/unordered_flat_map.hpp>
  6. #include <type_traits>
  7. #include "absl/container/flat_hash_map.h"
  8. #include "common/map.h"
  9. #include "common/raw_hashtable_benchmark_helpers.h"
  10. #include "llvm/ADT/DenseMap.h"
  11. namespace Carbon {
  12. namespace {
  13. using RawHashtable::CarbonHashDI;
  14. using RawHashtable::GetKeysAndHitKeys;
  15. using RawHashtable::GetKeysAndMissKeys;
  16. using RawHashtable::HitArgs;
  17. using RawHashtable::LowZeroBitInt;
  18. using RawHashtable::ReportTableMetrics;
  19. using RawHashtable::SizeArgs;
  20. using RawHashtable::ValueToBool;
  21. // Helpers to synthesize some value of one of the three types we use as value
  22. // types.
  23. template <typename T>
  24. auto MakeValue() -> T {
  25. if constexpr (std::is_same_v<T, llvm::StringRef>) {
  26. return "abc";
  27. } else if constexpr (std::is_pointer_v<T>) {
  28. static std::remove_pointer_t<T> x;
  29. return &x;
  30. } else {
  31. return 42;
  32. }
  33. }
  34. template <typename T>
  35. auto MakeValue2() -> T {
  36. if constexpr (std::is_same_v<T, llvm::StringRef>) {
  37. return "qux";
  38. } else if constexpr (std::is_pointer_v<T>) {
  39. static std::remove_pointer_t<T> y;
  40. return &y;
  41. } else {
  42. return 7;
  43. }
  44. }
  45. template <typename MapT>
  46. struct IsCarbonMapImpl : std::false_type {};
  47. template <typename KT, typename VT, int MinSmallSize>
  48. struct IsCarbonMapImpl<Map<KT, VT, MinSmallSize>> : std::true_type {};
  49. template <typename MapT>
  50. static constexpr bool IsCarbonMap = IsCarbonMapImpl<MapT>::value;
  51. // A wrapper around various map types that we specialize to implement a common
  52. // API used in the benchmarks for various different map data structures that
  53. // support different APIs. The primary template assumes a roughly
  54. // `std::unordered_map` API design, and types with a different API design are
  55. // supported through specializations.
  56. template <typename MapT>
  57. struct MapWrapperImpl {
  58. using KeyT = typename MapT::key_type;
  59. using ValueT = typename MapT::mapped_type;
  60. MapT m;
  61. auto BenchContains(KeyT k) -> bool { return m.find(k) != m.end(); }
  62. auto BenchLookup(KeyT k) -> bool {
  63. auto it = m.find(k);
  64. if (it == m.end()) {
  65. return false;
  66. }
  67. return ValueToBool(it->second);
  68. }
  69. auto BenchInsert(KeyT k, ValueT v) -> bool {
  70. auto result = m.insert({k, v});
  71. return result.second;
  72. }
  73. auto BenchUpdate(KeyT k, ValueT v) -> bool {
  74. auto result = m.insert({k, v});
  75. result.first->second = v;
  76. return result.second;
  77. }
  78. auto BenchErase(KeyT k) -> bool { return m.erase(k) != 0; }
  79. };
  80. // Explicit (partial) specialization for the Carbon map type that uses its
  81. // different API design.
  82. template <typename KT, typename VT, int MinSmallSize>
  83. struct MapWrapperImpl<Map<KT, VT, MinSmallSize>> {
  84. using MapT = Map<KT, VT, MinSmallSize>;
  85. using KeyT = KT;
  86. using ValueT = VT;
  87. MapT m;
  88. auto BenchContains(KeyT k) -> bool { return m.Contains(k); }
  89. auto BenchLookup(KeyT k) -> bool {
  90. auto result = m.Lookup(k);
  91. if (!result) {
  92. return false;
  93. }
  94. return ValueToBool(result.value());
  95. }
  96. auto BenchInsert(KeyT k, ValueT v) -> bool {
  97. auto result = m.Insert(k, v);
  98. return result.is_inserted();
  99. }
  100. auto BenchUpdate(KeyT k, ValueT v) -> bool {
  101. auto result = m.Update(k, v);
  102. return result.is_inserted();
  103. }
  104. auto BenchErase(KeyT k) -> bool { return m.Erase(k); }
  105. };
  106. // Provide a way to override the Carbon Map specific benchmark runs with another
  107. // hashtable implementation. When building, you can use one of these enum names
  108. // in a macro define such as `-DCARBON_MAP_BENCH_OVERRIDE=Name` in order to
  109. // trigger a specific override for the `Map` type benchmarks. This is used to
  110. // get before/after runs that compare the performance of Carbon's Map versus
  111. // other implementations.
  112. enum class MapOverride : uint8_t {
  113. None,
  114. Abseil,
  115. Boost,
  116. LLVM,
  117. LLVMAndCarbonHash,
  118. };
  119. #ifndef CARBON_MAP_BENCH_OVERRIDE
  120. #define CARBON_MAP_BENCH_OVERRIDE None
  121. #endif
  122. template <typename MapT, MapOverride Override>
  123. struct MapWrapperOverride : MapWrapperImpl<MapT> {};
  124. template <typename KeyT, typename ValueT, int MinSmallSize>
  125. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>, MapOverride::Abseil>
  126. : MapWrapperImpl<absl::flat_hash_map<KeyT, ValueT>> {};
  127. template <typename KeyT, typename ValueT, int MinSmallSize>
  128. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>, MapOverride::Boost>
  129. : MapWrapperImpl<boost::unordered::unordered_flat_map<KeyT, ValueT>> {};
  130. template <typename KeyT, typename ValueT, int MinSmallSize>
  131. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>, MapOverride::LLVM>
  132. : MapWrapperImpl<llvm::DenseMap<KeyT, ValueT>> {};
  133. template <typename KeyT, typename ValueT, int MinSmallSize>
  134. struct MapWrapperOverride<Map<KeyT, ValueT, MinSmallSize>,
  135. MapOverride::LLVMAndCarbonHash>
  136. : MapWrapperImpl<llvm::DenseMap<KeyT, ValueT, CarbonHashDI<KeyT>>> {};
  137. template <typename MapT>
  138. using MapWrapper =
  139. MapWrapperOverride<MapT, MapOverride::CARBON_MAP_BENCH_OVERRIDE>;
  140. template <typename MapT>
  141. auto ReportMetrics(const MapWrapper<MapT>& m_wrapper, benchmark::State& state)
  142. -> void {
  143. // Report some extra statistics about the Carbon type.
  144. if constexpr (IsCarbonMap<MapT>) {
  145. ReportTableMetrics(m_wrapper.m, state);
  146. }
  147. }
  148. // NOLINTBEGIN(bugprone-macro-parentheses): Parentheses are incorrect here.
  149. #define MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, KT, VT) \
  150. BENCHMARK(NAME<Map<KT, VT>>)->Apply(APPLY); \
  151. BENCHMARK(NAME<absl::flat_hash_map<KT, VT>>)->Apply(APPLY); \
  152. BENCHMARK(NAME<boost::unordered::unordered_flat_map<KT, VT>>)->Apply(APPLY); \
  153. BENCHMARK(NAME<llvm::DenseMap<KT, VT>>)->Apply(APPLY); \
  154. BENCHMARK(NAME<llvm::DenseMap<KT, VT, CarbonHashDI<KT>>>)->Apply(APPLY)
  155. // NOLINTEND(bugprone-macro-parentheses)
  156. #define MAP_BENCHMARK_ONE_OP(NAME, APPLY) \
  157. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int, int); \
  158. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int*, int*); \
  159. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, int, llvm::StringRef); \
  160. MAP_BENCHMARK_ONE_OP_SIZE(NAME, APPLY, llvm::StringRef, int)
  161. // Benchmark the minimal latency of checking if a key is contained within a map,
  162. // when it *is* definitely in that map. Because this is only really measuring
  163. // the *minimal* latency, it is more similar to a throughput benchmark.
  164. //
  165. // While this is structured to observe the latency of testing for presence of a
  166. // key, it is important to understand the reality of what this measures. Because
  167. // the boolean result testing for whether a key is in a map is fundamentally
  168. // provided not by accessing some data, but by branching on data to a control
  169. // flow path which sets the boolean to `true` or `false`, the result can be
  170. // speculatively provided based on predicting the conditional branch without
  171. // waiting for the results of the comparison to become available. And because
  172. // this is a small operation and we arrange for all the candidate keys to be
  173. // present, that branch *should* be predicted extremely well. The result is that
  174. // this measures the un-speculated latency of testing for presence which should
  175. // be small or zero. Which is why this is ultimately more similar to a
  176. // throughput benchmark.
  177. //
  178. // Because of these measurement oddities, the specific measurements here may not
  179. // be very interesting for predicting real-world performance in any way, but
  180. // they are useful for comparing how 'cheap' the operation is across changes to
  181. // the data structure or between similar data structures with similar
  182. // properties.
  183. template <typename MapT>
  184. static void BM_MapContainsHit(benchmark::State& state) {
  185. using MapWrapperT = MapWrapper<MapT>;
  186. using KT = typename MapWrapperT::KeyT;
  187. using VT = typename MapWrapperT::ValueT;
  188. MapWrapperT m;
  189. auto [keys, lookup_keys] =
  190. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  191. for (auto k : keys) {
  192. m.BenchInsert(k, MakeValue<VT>());
  193. }
  194. ssize_t lookup_keys_size = lookup_keys.size();
  195. while (state.KeepRunningBatch(lookup_keys_size)) {
  196. for (ssize_t i = 0; i < lookup_keys_size;) {
  197. // We block optimizing `i` as that has proven both more effective at
  198. // blocking the loop from being optimized away and avoiding disruption of
  199. // the generated code that we're benchmarking.
  200. benchmark::DoNotOptimize(i);
  201. bool result = m.BenchContains(lookup_keys[i]);
  202. CARBON_DCHECK(result);
  203. // We use the lookup success to step through keys, establishing a
  204. // dependency between each lookup. This doesn't fully allow us to measure
  205. // latency rather than throughput, as noted above.
  206. i += static_cast<ssize_t>(result);
  207. }
  208. }
  209. ReportMetrics(m, state);
  210. }
  211. MAP_BENCHMARK_ONE_OP(BM_MapContainsHit, HitArgs);
  212. // Similar to `BM_MapContainsHit`, while this is structured as a latency
  213. // benchmark, the critical path is expected to be well predicted and so it
  214. // should turn into something closer to a throughput benchmark.
  215. template <typename MapT>
  216. static void BM_MapContainsMiss(benchmark::State& state) {
  217. using MapWrapperT = MapWrapper<MapT>;
  218. using KT = typename MapWrapperT::KeyT;
  219. using VT = typename MapWrapperT::ValueT;
  220. MapWrapperT m;
  221. auto [keys, lookup_keys] = GetKeysAndMissKeys<KT>(state.range(0));
  222. for (auto k : keys) {
  223. m.BenchInsert(k, MakeValue<VT>());
  224. }
  225. ssize_t lookup_keys_size = lookup_keys.size();
  226. while (state.KeepRunningBatch(lookup_keys_size)) {
  227. for (ssize_t i = 0; i < lookup_keys_size;) {
  228. benchmark::DoNotOptimize(i);
  229. bool result = m.BenchContains(lookup_keys[i]);
  230. CARBON_DCHECK(!result);
  231. i += static_cast<ssize_t>(!result);
  232. }
  233. }
  234. ReportMetrics(m, state);
  235. }
  236. MAP_BENCHMARK_ONE_OP(BM_MapContainsMiss, SizeArgs);
  237. // This is a genuine latency benchmark. We lookup a key in the hashtable and use
  238. // the value associated with that key in the critical path of loading the next
  239. // iteration's key. We still ensure the keys are always present, and so we
  240. // generally expect the data structure branches to be well predicted. But we
  241. // vary the keys aggressively to avoid any prediction artifacts from repeatedly
  242. // examining the same key.
  243. //
  244. // This latency can be very helpful for understanding a range of data structure
  245. // behaviors:
  246. // - Many users of hashtables are directly dependent on the latency of this
  247. // operation, and this micro-benchmark will reflect the expected latency for
  248. // them.
  249. // - Showing how latency varies across different sizes of table and different
  250. // fractions of the table being accessed (and thus needing space in the
  251. // cache).
  252. //
  253. // However, it remains an ultimately synthetic and unrepresentative benchmark.
  254. // It should primarily be used to understand the relative cost of these
  255. // operations between versions of the data structure or between related data
  256. // structures.
  257. //
  258. // We vary both the number of entries in the table and the number of distinct
  259. // keys used when doing lookups. As the table becomes large, the latter dictates
  260. // the fraction of the table that will be accessed and thus the working set size
  261. // of the benchmark. Querying the same small number of keys in even a large
  262. // table doesn't actually encounter any cache pressure, so only a few of these
  263. // benchmarks will show any effects of the caching subsystem.
  264. template <typename MapT>
  265. static void BM_MapLookupHit(benchmark::State& state) {
  266. using MapWrapperT = MapWrapper<MapT>;
  267. using KT = typename MapWrapperT::KeyT;
  268. using VT = typename MapWrapperT::ValueT;
  269. MapWrapperT m;
  270. auto [keys, lookup_keys] =
  271. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  272. for (auto k : keys) {
  273. m.BenchInsert(k, MakeValue<VT>());
  274. }
  275. ssize_t lookup_keys_size = lookup_keys.size();
  276. while (state.KeepRunningBatch(lookup_keys_size)) {
  277. for (ssize_t i = 0; i < lookup_keys_size;) {
  278. benchmark::DoNotOptimize(i);
  279. bool result = m.BenchLookup(lookup_keys[i]);
  280. CARBON_DCHECK(result);
  281. i += static_cast<ssize_t>(result);
  282. }
  283. }
  284. ReportMetrics(m, state);
  285. }
  286. MAP_BENCHMARK_ONE_OP(BM_MapLookupHit, HitArgs);
  287. // We also do some minimal benchmarking with integers that have a
  288. // large number of low zero bits shifted into them. These present particular
  289. // challenges to the hashing strategy Carbon's hash tables use and so they help
  290. // form stress tests and benchmark to make sure the hash function quality
  291. // remains reasonable even under adverse conditions. We can't go past a certain
  292. // limit here without our hash tables becoming impossibly slow due to complete
  293. // collapse of the hash functions -- if we ever need to hash integers with more
  294. // than 32 low zero bits, we'll ask that code to use a custom hash algorithm.
  295. //
  296. // We don't benchmark these everywhere as they only provide marginal information
  297. // beyond the core types, and checking just this operation covers that
  298. // sufficiently.
  299. MAP_BENCHMARK_ONE_OP_SIZE(BM_MapLookupHit, HitArgs, LowZeroBitInt<12>, int);
  300. MAP_BENCHMARK_ONE_OP_SIZE(BM_MapLookupHit, HitArgs, LowZeroBitInt<24>, int);
  301. MAP_BENCHMARK_ONE_OP_SIZE(BM_MapLookupHit, HitArgs, LowZeroBitInt<32>, int);
  302. // This is an update throughput benchmark in practice. While whether the key was
  303. // a hit is kept in the critical path, we only use keys that are hits and so
  304. // expect that to be fully predicted and speculated.
  305. //
  306. // However, we expect this fairly closely matches how user code interacts with
  307. // an update-style API. It will have some conditional testing (even if just an
  308. // assert) on whether the key was a hit and otherwise continue executing. As a
  309. // consequence the actual update is expected to not be in a meaningful critical
  310. // path.
  311. //
  312. // This still provides a basic way to measure the cost of this operation,
  313. // especially when comparing between implementations or across different hash
  314. // tables.
  315. template <typename MapT>
  316. static void BM_MapUpdateHit(benchmark::State& state) {
  317. using MapWrapperT = MapWrapper<MapT>;
  318. using KT = typename MapWrapperT::KeyT;
  319. using VT = typename MapWrapperT::ValueT;
  320. MapWrapperT m;
  321. auto [keys, lookup_keys] =
  322. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  323. for (auto k : keys) {
  324. m.BenchInsert(k, MakeValue<VT>());
  325. }
  326. ssize_t lookup_keys_size = lookup_keys.size();
  327. while (state.KeepRunningBatch(lookup_keys_size)) {
  328. for (ssize_t i = 0; i < lookup_keys_size; ++i) {
  329. benchmark::DoNotOptimize(i);
  330. bool inserted = m.BenchUpdate(lookup_keys[i], MakeValue2<VT>());
  331. CARBON_DCHECK(!inserted);
  332. }
  333. }
  334. ReportMetrics(m, state);
  335. }
  336. MAP_BENCHMARK_ONE_OP(BM_MapUpdateHit, HitArgs);
  337. // First erase and then insert the key. The code path will always be the same
  338. // here and so we expect this to largely be a throughput benchmark because of
  339. // branch prediction and speculative execution.
  340. //
  341. // We don't expect erase followed by insertion to be a common user code
  342. // sequence, but we don't have a good way of benchmarking either erase or insert
  343. // in isolation -- each would change the size of the table and thus the next
  344. // iteration's benchmark. And if we try to correct the table size outside of the
  345. // timed region, we end up trying to exclude too fine grained of a region from
  346. // timers to get good measurement data.
  347. //
  348. // Our solution is to benchmark both erase and insertion back to back. We can
  349. // then get a good profile of the code sequence of each, and at least measure
  350. // the sum cost of these reliably. Careful profiling can help attribute that
  351. // cost between erase and insert in order to understand which of the two
  352. // operations is contributing most to any performance artifacts observed.
  353. template <typename MapT>
  354. static void BM_MapEraseUpdateHit(benchmark::State& state) {
  355. using MapWrapperT = MapWrapper<MapT>;
  356. using KT = typename MapWrapperT::KeyT;
  357. using VT = typename MapWrapperT::ValueT;
  358. MapWrapperT m;
  359. auto [keys, lookup_keys] =
  360. GetKeysAndHitKeys<KT>(state.range(0), state.range(1));
  361. for (auto k : keys) {
  362. m.BenchInsert(k, MakeValue<VT>());
  363. }
  364. ssize_t lookup_keys_size = lookup_keys.size();
  365. while (state.KeepRunningBatch(lookup_keys_size)) {
  366. for (ssize_t i = 0; i < lookup_keys_size; ++i) {
  367. benchmark::DoNotOptimize(i);
  368. m.BenchErase(lookup_keys[i]);
  369. benchmark::ClobberMemory();
  370. bool inserted = m.BenchUpdate(lookup_keys[i], MakeValue2<VT>());
  371. CARBON_DCHECK(inserted);
  372. }
  373. }
  374. }
  375. MAP_BENCHMARK_ONE_OP(BM_MapEraseUpdateHit, HitArgs);
  376. // NOLINTBEGIN(bugprone-macro-parentheses): Parentheses are incorrect here.
  377. #define MAP_BENCHMARK_OP_SEQ_SIZE(NAME, KT, VT) \
  378. BENCHMARK(NAME<Map<KT, VT>>)->Apply(SizeArgs); \
  379. BENCHMARK(NAME<absl::flat_hash_map<KT, VT>>)->Apply(SizeArgs); \
  380. BENCHMARK(NAME<boost::unordered::unordered_flat_map<KT, VT>>) \
  381. ->Apply(SizeArgs); \
  382. BENCHMARK(NAME<llvm::DenseMap<KT, VT>>)->Apply(APPLY); \
  383. BENCHMARK(NAME<llvm::DenseMap<KT, VT, CarbonHashDI<KT>>>)->Apply(SizeArgs)
  384. // NOLINTEND(bugprone-macro-parentheses)
  385. #define MAP_BENCHMARK_OP_SEQ(NAME) \
  386. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int, int); \
  387. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int*, int*); \
  388. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, int, llvm::StringRef); \
  389. MAP_BENCHMARK_OP_SEQ_SIZE(NAME, llvm::StringRef, int)
  390. // This is an interesting, somewhat specialized benchmark that measures the cost
  391. // of inserting a sequence of key/value pairs into a table with no collisions up
  392. // to some size and then inserting a colliding key and throwing away the table.
  393. //
  394. // This can give an idea of the cost of building up a map of a particular size,
  395. // but without actually using it. Or of algorithms like cycle-detection which
  396. // for some reason need an associative container.
  397. //
  398. // It also covers both the insert-into-an-empty-slot code path that isn't
  399. // covered elsewhere, and the code path for growing a table to a larger size.
  400. //
  401. // Because this benchmark operates on whole maps, we also compute the number of
  402. // probed keys for Carbon's set as that is both a general reflection of the
  403. // efficacy of the underlying hash function, and a direct factor that drives the
  404. // cost of these operations.
  405. template <typename MapT>
  406. static void BM_MapInsertSeq(benchmark::State& state) {
  407. using MapWrapperT = MapWrapper<MapT>;
  408. using KT = typename MapWrapperT::KeyT;
  409. using VT = typename MapWrapperT::ValueT;
  410. constexpr ssize_t LookupKeysSize = 1 << 8;
  411. auto [keys, lookup_keys] =
  412. GetKeysAndHitKeys<KT>(state.range(0), LookupKeysSize);
  413. // Note that we don't force batches that use all the lookup keys because
  414. // there's no difference in cache usage by covering all the different lookup
  415. // keys.
  416. ssize_t i = 0;
  417. for (auto _ : state) {
  418. benchmark::DoNotOptimize(i);
  419. MapWrapperT m;
  420. for (auto k : keys) {
  421. bool inserted = m.BenchInsert(k, MakeValue<VT>());
  422. CARBON_DCHECK(inserted, "Must be a successful insert!");
  423. }
  424. // Now insert a final random repeated key.
  425. bool inserted = m.BenchInsert(lookup_keys[i], MakeValue2<VT>());
  426. CARBON_DCHECK(!inserted, "Must already be in the map!");
  427. // Rotate through the shuffled keys.
  428. i = (i + static_cast<ssize_t>(!inserted)) & (LookupKeysSize - 1);
  429. }
  430. // It can be easier in some cases to think of this as a key-throughput rate of
  431. // insertion rather than the latency of inserting N keys, so construct the
  432. // rate counter as well.
  433. state.counters["KeyRate"] = benchmark::Counter(
  434. keys.size(), benchmark::Counter::kIsIterationInvariantRate);
  435. // Report some extra statistics about the Carbon type.
  436. if constexpr (IsCarbonMap<MapT>) {
  437. // Re-build a map outside of the timing loop to look at the statistics
  438. // rather than the timing.
  439. MapWrapperT m;
  440. for (auto k : keys) {
  441. bool inserted = m.BenchInsert(k, MakeValue<VT>());
  442. CARBON_DCHECK(inserted, "Must be a successful insert!");
  443. }
  444. ReportMetrics(m, state);
  445. // Uncomment this call to print out statistics about the index-collisions
  446. // among these keys for debugging:
  447. //
  448. // RawHashtable::DumpHashStatistics(keys);
  449. }
  450. }
  451. MAP_BENCHMARK_ONE_OP(BM_MapInsertSeq, SizeArgs);
  452. } // namespace
  453. } // namespace Carbon