tokenized_buffer_benchmark.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include <benchmark/benchmark.h>
  5. #include <algorithm>
  6. #include <utility>
  7. #include "absl/random/random.h"
  8. #include "common/check.h"
  9. #include "llvm/ADT/Sequence.h"
  10. #include "llvm/ADT/StringExtras.h"
  11. #include "toolchain/diagnostics/diagnostic_emitter.h"
  12. #include "toolchain/diagnostics/null_diagnostics.h"
  13. #include "toolchain/lex/token_kind.h"
  14. #include "toolchain/lex/tokenized_buffer.h"
  15. namespace Carbon::Lex {
  16. namespace {
  17. // A large value for measurement stability without making benchmarking too slow.
  18. // Needs to be a multiple of 100 so we can easily divide it up into percentages,
  19. // and 1% itself needs to not be too tiny. This makes 100,000 a great balance.
  20. constexpr int NumTokens = 100'000;
  21. auto IdentifierStartChars() -> llvm::ArrayRef<char> {
  22. static llvm::SmallVector<char> chars = [] {
  23. llvm::SmallVector<char> chars;
  24. chars.push_back('_');
  25. for (char c : llvm::seq_inclusive('A', 'Z')) {
  26. chars.push_back(c);
  27. }
  28. for (char c : llvm::seq_inclusive('a', 'z')) {
  29. chars.push_back(c);
  30. }
  31. return chars;
  32. }();
  33. return chars;
  34. }
  35. auto IdentifierChars() -> llvm::ArrayRef<char> {
  36. static llvm::SmallVector<char> chars = [] {
  37. llvm::ArrayRef<char> start_chars = IdentifierStartChars();
  38. llvm::SmallVector<char> chars(start_chars.begin(), start_chars.end());
  39. for (char c : llvm::seq_inclusive('0', '9')) {
  40. chars.push_back(c);
  41. }
  42. return chars;
  43. }();
  44. return chars;
  45. }
  46. // Generates a random identifier string of the specified length using the
  47. // provided RNG BitGen.
  48. auto GenerateRandomIdentifier(absl::BitGen& gen, int length) -> std::string {
  49. llvm::ArrayRef<char> start_chars = IdentifierStartChars();
  50. llvm::ArrayRef<char> chars = IdentifierChars();
  51. std::string id_result;
  52. llvm::raw_string_ostream os(id_result);
  53. llvm::StringRef id;
  54. do {
  55. // Erase any prior attempts to find an identifier.
  56. id_result.clear();
  57. os << start_chars[absl::Uniform<int>(gen, 0, start_chars.size())];
  58. for (int j : llvm::seq(0, length)) {
  59. static_cast<void>(j);
  60. os << chars[absl::Uniform<int>(gen, 0, chars.size())];
  61. }
  62. // Check if we ended up forming an integer type literal or a keyword, and
  63. // try again.
  64. id = llvm::StringRef(id_result);
  65. } while (
  66. llvm::any_of(TokenKind::KeywordTokens,
  67. [id](auto token) { return id == token.fixed_spelling(); }) ||
  68. ((id.consume_front("i") || id.consume_front("u") ||
  69. id.consume_front("f")) &&
  70. llvm::all_of(id, [](const char c) { return llvm::isDigit(c); })));
  71. return id_result;
  72. }
  73. // Get a static pool of random identifiers with the desired distribution.
  74. template <int MinLength = 1, int MaxLength = 64, bool Uniform = false>
  75. auto GetRandomIdentifiers() -> const std::array<std::string, NumTokens>& {
  76. static_assert(MinLength <= MaxLength);
  77. static_assert(
  78. Uniform || MaxLength <= 64,
  79. "Cannot produce a meaningful non-uniform distribution of lengths longer "
  80. "than 64 as those are exceedingly rare in our observed data sets.");
  81. static const std::array<std::string, NumTokens> id_storage = [] {
  82. std::array<int, 64> id_length_counts;
  83. // For non-uniform distribution, we simulate a distribution roughly based on
  84. // the observed histogram of identifier lengths, but smoothed a bit and
  85. // reduced to small counts so that we cycle through all the lengths
  86. // reasonably quickly. We want sampling of even 10% of NumTokens from this
  87. // in a round-robin form to not be skewed overly much. This still inherently
  88. // compresses the long tail as we'd rather have coverage even though it
  89. // distorts the distribution a bit.
  90. //
  91. // The distribution here comes from a script that analyzes source code run
  92. // over a few directories of LLVM. The script renders a visual ascii-art
  93. // histogram along with the data for each bucket, and that output is
  94. // included in comments above each bucket size below to help visualize the
  95. // rough shape we're aiming for.
  96. //
  97. // 1 characters [3976] ███████████████████████████████▊
  98. id_length_counts[0] = 40;
  99. // 2 characters [3724] █████████████████████████████▊
  100. id_length_counts[1] = 40;
  101. // 3 characters [4173] █████████████████████████████████▍
  102. id_length_counts[2] = 40;
  103. // 4 characters [5000] ████████████████████████████████████████
  104. id_length_counts[3] = 50;
  105. // 5 characters [1568] ████████████▌
  106. id_length_counts[4] = 20;
  107. // 6 characters [2226] █████████████████▊
  108. id_length_counts[5] = 20;
  109. // 7 characters [2380] ███████████████████
  110. id_length_counts[6] = 20;
  111. // 8 characters [1786] ██████████████▎
  112. id_length_counts[7] = 18;
  113. // 9 characters [1397] ███████████▏
  114. id_length_counts[8] = 12;
  115. // 10 characters [ 739] █████▉
  116. id_length_counts[9] = 12;
  117. // 11 characters [ 779] ██████▎
  118. id_length_counts[10] = 12;
  119. // 12 characters [1344] ██████████▊
  120. id_length_counts[11] = 12;
  121. // 13 characters [ 498] ████
  122. id_length_counts[12] = 5;
  123. // 14 characters [ 284] ██▎
  124. id_length_counts[13] = 3;
  125. // 15 characters [ 172] █▍
  126. // 16 characters [ 278] ██▎
  127. // 17 characters [ 191] █▌
  128. // 18 characters [ 207] █▋
  129. for (int i : llvm::seq(14, 18)) {
  130. id_length_counts[i] = 2;
  131. }
  132. // 19 - 63 characters are all <100 but non-zero, and we map them to 1 for
  133. // coverage despite slightly over weighting the tail.
  134. for (int i : llvm::seq(18, 64)) {
  135. id_length_counts[i] = 1;
  136. }
  137. // Used to track the different count buckets when in a non-uniform
  138. // distribution.
  139. int length_bucket_index = 0;
  140. int length_count = 0;
  141. std::array<std::string, NumTokens> ids;
  142. absl::BitGen gen;
  143. for (auto [i, id] : llvm::enumerate(ids)) {
  144. if (Uniform) {
  145. // Rather than using randomness, for a uniform distribution rotate
  146. // lengths in round-robin to get a deterministic and exact size on every
  147. // run. We will then shuffle them at the end to produce a random
  148. // ordering.
  149. int length = MinLength + i % (1 + MaxLength - MinLength);
  150. id = GenerateRandomIdentifier(gen, length);
  151. continue;
  152. }
  153. // For non-uniform distribution, walk through each each length bucket
  154. // until our count matches the desired distribution, and then move to the
  155. // next.
  156. id = GenerateRandomIdentifier(gen, length_bucket_index + 1);
  157. if (length_count < id_length_counts[length_bucket_index]) {
  158. ++length_count;
  159. } else {
  160. length_bucket_index =
  161. (length_bucket_index + 1) % id_length_counts.size();
  162. length_count = 0;
  163. }
  164. }
  165. return ids;
  166. }();
  167. return id_storage;
  168. }
  169. // Compute a random sequence of just identifiers.
  170. template <int MinLength = 1, int MaxLength = 64, bool Uniform = false>
  171. auto RandomIdentifierSeq() -> std::string {
  172. // Get a static pool of identifiers with the desired distribution.
  173. const std::array<std::string, NumTokens>& ids =
  174. GetRandomIdentifiers<MinLength, MaxLength, Uniform>();
  175. // Shuffle tokens so we get exactly one of each identifier but in a random
  176. // order.
  177. std::array<llvm::StringRef, NumTokens> tokens;
  178. for (int i : llvm::seq(NumTokens)) {
  179. tokens[i] = ids[i];
  180. }
  181. std::shuffle(tokens.begin(), tokens.end(), absl::BitGen());
  182. return llvm::join(tokens, " ");
  183. }
  184. auto GetSymbolTokenTable() -> llvm::ArrayRef<TokenKind> {
  185. // Build our own table of symbols so we can use repetitions to skew the
  186. // distribution.
  187. static auto symbol_token_table_storage = [] {
  188. llvm::SmallVector<TokenKind> table;
  189. #define CARBON_SYMBOL_TOKEN(TokenName, Spelling) \
  190. table.push_back(TokenKind::TokenName);
  191. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName)
  192. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName)
  193. #include "toolchain/lex/token_kind.def"
  194. table.insert(table.end(), 32, TokenKind::Semi);
  195. table.insert(table.end(), 16, TokenKind::Comma);
  196. table.insert(table.end(), 12, TokenKind::Period);
  197. table.insert(table.end(), 8, TokenKind::Colon);
  198. table.insert(table.end(), 8, TokenKind::Equal);
  199. table.insert(table.end(), 4, TokenKind::Amp);
  200. table.insert(table.end(), 4, TokenKind::ColonExclaim);
  201. table.insert(table.end(), 4, TokenKind::EqualEqual);
  202. table.insert(table.end(), 4, TokenKind::ExclaimEqual);
  203. table.insert(table.end(), 4, TokenKind::MinusGreater);
  204. table.insert(table.end(), 4, TokenKind::Star);
  205. return table;
  206. }();
  207. return symbol_token_table_storage;
  208. }
  209. // Compute a random sequence of mixed symbols, keywords, and identifiers, with
  210. // percentages of each according to the parameters.
  211. auto RandomMixedSeq(int symbol_percent, int keyword_percent) -> std::string {
  212. CARBON_CHECK(0 <= symbol_percent && symbol_percent <= 100)
  213. << "Must be a percent: [0, 100].";
  214. CARBON_CHECK(0 <= keyword_percent && keyword_percent <= 100)
  215. << "Must be a percent: [0, 100].";
  216. CARBON_CHECK((symbol_percent + keyword_percent) <= 100)
  217. << "Cannot have >100%.";
  218. static_assert((NumTokens % 100) == 0,
  219. "The number of tokens must be divisible by 100 so that we can "
  220. "easily scale integer percentages up to it.");
  221. // Get static pools of symbols, keywords, and identifiers.
  222. llvm::ArrayRef<TokenKind> symbols = GetSymbolTokenTable();
  223. llvm::ArrayRef<TokenKind> keywords = TokenKind::KeywordTokens;
  224. const std::array<std::string, NumTokens>& ids = GetRandomIdentifiers();
  225. // Build a list of StringRefs from the different types with the desired
  226. // distribution, then shuffle that list.
  227. std::array<llvm::StringRef, NumTokens> tokens;
  228. int num_symbols = (NumTokens / 100) * symbol_percent;
  229. int num_keywords = (NumTokens / 100) * keyword_percent;
  230. int num_identifiers = NumTokens - num_symbols - num_keywords;
  231. CARBON_CHECK(num_identifiers == 0 || num_identifiers > 500)
  232. << "We require at least 500 identifiers as we need to collect a "
  233. "reasonable number of samples to end up with a reasonable "
  234. "distribution of lengths.";
  235. for (int i : llvm::seq(num_symbols)) {
  236. tokens[i] = symbols[i % symbols.size()].fixed_spelling();
  237. }
  238. for (int i : llvm::seq(num_keywords)) {
  239. tokens[num_symbols + i] = keywords[i % keywords.size()].fixed_spelling();
  240. }
  241. for (int i : llvm::seq(num_identifiers)) {
  242. // We always have enough identifiers, so no need to mod here.
  243. tokens[num_symbols + num_keywords + i] = ids[i];
  244. }
  245. std::shuffle(tokens.begin(), tokens.end(), absl::BitGen());
  246. return llvm::join(tokens, " ");
  247. }
  248. class LexerBenchHelper {
  249. public:
  250. explicit LexerBenchHelper(llvm::StringRef text)
  251. : source_(MakeSourceBuffer(text)) {}
  252. auto Lex() -> TokenizedBuffer {
  253. DiagnosticConsumer& consumer = NullDiagnosticConsumer();
  254. return TokenizedBuffer::Lex(source_, consumer);
  255. }
  256. auto DiagnoseErrors() -> std::string {
  257. std::string result;
  258. llvm::raw_string_ostream out(result);
  259. StreamDiagnosticConsumer consumer(out);
  260. auto buffer = TokenizedBuffer::Lex(source_, consumer);
  261. consumer.Flush();
  262. CARBON_CHECK(buffer.has_errors())
  263. << "Asked to diagnose errors but none found!";
  264. return result;
  265. }
  266. auto source_text() -> llvm::StringRef { return source_.text(); }
  267. private:
  268. auto MakeSourceBuffer(llvm::StringRef text) -> SourceBuffer {
  269. CARBON_CHECK(fs_.addFile(filename_, /*ModificationTime=*/0,
  270. llvm::MemoryBuffer::getMemBuffer(text)));
  271. return std::move(*SourceBuffer::CreateFromFile(
  272. fs_, filename_, ConsoleDiagnosticConsumer()));
  273. }
  274. llvm::vfs::InMemoryFileSystem fs_;
  275. std::string filename_ = "test.carbon";
  276. SourceBuffer source_;
  277. };
  278. void BM_ValidKeywords(benchmark::State& state) {
  279. absl::BitGen gen;
  280. std::array<llvm::StringRef, NumTokens> tokens;
  281. for (int i : llvm::seq(NumTokens)) {
  282. tokens[i] = TokenKind::KeywordTokens[i % TokenKind::KeywordTokens.size()]
  283. .fixed_spelling();
  284. }
  285. std::shuffle(tokens.begin(), tokens.end(), gen);
  286. std::string source = llvm::join(tokens, " ");
  287. LexerBenchHelper helper(source);
  288. for (auto _ : state) {
  289. TokenizedBuffer buffer = helper.Lex();
  290. CARBON_CHECK(!buffer.has_errors());
  291. }
  292. state.SetBytesProcessed(state.iterations() * source.size());
  293. state.counters["tokens_per_second"] = benchmark::Counter(
  294. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  295. }
  296. BENCHMARK(BM_ValidKeywords);
  297. template <int MinLength, int MaxLength, bool Uniform>
  298. void BM_ValidIdentifiers(benchmark::State& state) {
  299. std::string source = RandomIdentifierSeq<MinLength, MaxLength, Uniform>();
  300. LexerBenchHelper helper(source);
  301. for (auto _ : state) {
  302. TokenizedBuffer buffer = helper.Lex();
  303. CARBON_CHECK(!buffer.has_errors()) << helper.DiagnoseErrors();
  304. }
  305. state.SetBytesProcessed(state.iterations() * source.size());
  306. state.counters["tokens_per_second"] = benchmark::Counter(
  307. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  308. }
  309. // Benchmark the non-uniform distribution we observe in C++ code.
  310. BENCHMARK(BM_ValidIdentifiers<1, 64, /*Uniform=*/false>);
  311. // Also benchmark a few uniform distribution ranges of identifier widths to
  312. // cover different patterns that emerge with small, medium, and longer
  313. // identifiers.
  314. BENCHMARK(BM_ValidIdentifiers<1, 1, /*Uniform=*/true>);
  315. BENCHMARK(BM_ValidIdentifiers<3, 5, /*Uniform=*/true>);
  316. BENCHMARK(BM_ValidIdentifiers<3, 16, /*Uniform=*/true>);
  317. BENCHMARK(BM_ValidIdentifiers<12, 64, /*Uniform=*/true>);
  318. void BM_ValidMix(benchmark::State& state) {
  319. int symbol_percent = state.range(0);
  320. int keyword_percent = state.range(1);
  321. std::string source = RandomMixedSeq(symbol_percent, keyword_percent);
  322. LexerBenchHelper helper(source);
  323. for (auto _ : state) {
  324. TokenizedBuffer buffer = helper.Lex();
  325. // Ensure that lexing actually occurs for benchmarking and that it doesn't
  326. // hit errors that would skew the benchmark results.
  327. CARBON_CHECK(!buffer.has_errors()) << helper.DiagnoseErrors();
  328. }
  329. state.SetBytesProcessed(state.iterations() * source.size());
  330. state.counters["tokens_per_second"] = benchmark::Counter(
  331. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  332. }
  333. // The distributions between symbols, keywords, and identifiers here are
  334. // guesses. Eventually, we should collect more data to help tune these, but
  335. // hopefully the performance isn't too sensitive and we can just cover a wide
  336. // range here.
  337. BENCHMARK(BM_ValidMix)
  338. ->Args({10, 40})
  339. ->Args({25, 30})
  340. ->Args({50, 20})
  341. ->Args({75, 10});
  342. // This is a speed-of-light benchmark that should reflect memory bandwidth
  343. // (ideally) of simply reading all the source code. For speed-of-light we use
  344. // `strcpy` -- this both examines ever byte of the input looking for a null to
  345. // end the copy, and also writes to a data structure of roughly the same size as
  346. // the input. This routine is one we expect to be *very* well optimized and give
  347. // a good approximation of the fastest possible lexer given the physical
  348. // constraints of the machine. Note that which particular source we use as input
  349. // here isn't especially interesting, so we just pick one and should update it
  350. // to reflect whatever distribution is most realistic long-term. The
  351. // bytes/second throughput is the important output of this routine.
  352. auto BM_SpeedOfLightStrCpy(benchmark::State& state) -> void {
  353. std::string source =
  354. RandomMixedSeq(/*symbol_percent=*/25, /*keyword_percent=*/30);
  355. // A buffer to write the null-terminated contents of `source` into.
  356. llvm::OwningArrayRef<char> buffer(source.size() + 1);
  357. for (auto _ : state) {
  358. const char* text = source.data();
  359. benchmark::DoNotOptimize(text);
  360. strcpy(buffer.data(), text);
  361. benchmark::DoNotOptimize(buffer.data());
  362. }
  363. state.SetBytesProcessed(state.iterations() * source.size());
  364. state.counters["tokens_per_second"] = benchmark::Counter(
  365. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  366. }
  367. BENCHMARK(BM_SpeedOfLightStrCpy);
  368. // This is a speed-of-light benchmark that builds up a best-case byte-wise table
  369. // dispatch using guaranteed tail recursion. The goal is both to ensure the
  370. // general technique can reasonably hit the level of performance we need and to
  371. // establish how far from this speed of light the actual lexer currently sits.
  372. //
  373. // A major impact on the observed performance of this technique is how many
  374. // different functions are reached in this dispatch loop. This benchmark
  375. // infrastructure tries to bracket the range of performance this technique
  376. // affords with different numbers of dispatch target functions.
  377. using DispatchPtrT = auto (*)(ssize_t& index, const char* text, char* buffer)
  378. -> void;
  379. using DispatchTableT = std::array<DispatchPtrT, 256>;
  380. template <const DispatchTableT& Table>
  381. auto BasicDispatch(ssize_t& index, const char* text, char* buffer) -> void {
  382. *buffer = text[index];
  383. ++index;
  384. [[clang::musttail]] return Table[static_cast<unsigned char>(text[index])](
  385. index, text, buffer);
  386. }
  387. template <const DispatchTableT& Table, char C>
  388. auto SpecializedDispatch(ssize_t& index, const char* text, char* buffer)
  389. -> void {
  390. CARBON_CHECK(C == text[index]);
  391. *buffer = C;
  392. ++index;
  393. [[clang::musttail]] return Table[static_cast<unsigned char>(text[index])](
  394. index, text, buffer);
  395. }
  396. // A sample of the symbol characters used in Carbon code. Doesn't need to be
  397. // perfect, as we just need to have a reasonably large # of distinct dispatch
  398. // functions.
  399. constexpr char DispatchSpecializableSymbols[] = {
  400. '!', '%', '(', ')', '*', '+', ',', '-', '.', ':',
  401. ';', '<', '=', '>', '?', '[', ']', '{', '}', '~',
  402. };
  403. // Create an array of all the characters we can specialize dispatch over --
  404. // [0-9A-Za-z] and the symbols above. Similar to the above symbols, doesn't need
  405. // to be exhaustive.
  406. constexpr std::array<char, 26 * 2 + 10 + sizeof(DispatchSpecializableSymbols)>
  407. DispatchSpecializableChars = []() constexpr {
  408. constexpr int Size = sizeof(DispatchSpecializableChars);
  409. std::array<char, Size> chars = {};
  410. int i = 0;
  411. for (char c = '0'; c <= '9'; ++c) {
  412. chars[i] = c;
  413. ++i;
  414. }
  415. for (char c = 'A'; c <= 'Z'; ++c) {
  416. chars[i] = c;
  417. ++i;
  418. }
  419. for (char c = 'a'; c <= 'z'; ++c) {
  420. chars[i] = c;
  421. ++i;
  422. }
  423. for (char c : DispatchSpecializableSymbols) {
  424. chars[i] = c;
  425. ++i;
  426. }
  427. CARBON_CHECK(i == Size);
  428. return chars;
  429. }();
  430. // Instantiate a number of specialized dispatch functions for characters in the
  431. // array above, and assign those function addresses to the character's entry in
  432. // the provided table. The provided `tmp_table` is a temporary that will
  433. // eventually initialize the provided `Table` constant, so the constant is what
  434. // we propagate to the instantiated function and the temporary is the one we
  435. // initialize.
  436. template <const DispatchTableT& Table, size_t... Indices>
  437. constexpr auto SpecializeDispatchTable(
  438. DispatchTableT& tmp_table, std::index_sequence<Indices...> /*indices*/)
  439. -> void {
  440. static_assert(sizeof...(Indices) <= sizeof(DispatchSpecializableChars));
  441. ((tmp_table[static_cast<unsigned char>(DispatchSpecializableChars[Indices])] =
  442. &SpecializedDispatch<Table, DispatchSpecializableChars[Indices]>),
  443. ...);
  444. }
  445. // The maximum number of dispatch targets is the size of the array + 1 (for the
  446. // base case target).
  447. constexpr int MaxDispatchTargets = sizeof(DispatchSpecializableChars) + 1;
  448. // Dispatch tables with a provided number of distinct dispatch targets. There
  449. // will always be one additional target for the null byte to end the loop.
  450. template <int NumDispatchTargets>
  451. constexpr DispatchTableT DispatchTable = []() constexpr {
  452. static_assert(NumDispatchTargets > 0, "Need at least one dispatch target.");
  453. static_assert(NumDispatchTargets <= MaxDispatchTargets,
  454. "Limited number of dispatch targets available.");
  455. DispatchTableT tmp_table = {};
  456. // Start with the basic dispatch target.
  457. for (int i = 0; i < 256; ++i) {
  458. tmp_table[i] = &BasicDispatch<DispatchTable<NumDispatchTargets>>;
  459. }
  460. if constexpr (NumDispatchTargets > 1) {
  461. // Add additional dispatch targets from our specializable array.
  462. SpecializeDispatchTable<DispatchTable<NumDispatchTargets>>(
  463. tmp_table, std::make_index_sequence<NumDispatchTargets - 1>());
  464. }
  465. // Special case the null byte index to end the tail-dispatch.
  466. tmp_table[0] =
  467. +[](ssize_t& index, const char* text, char* /*buffer*/) -> void {
  468. CARBON_CHECK(text[index] == '\0');
  469. return;
  470. };
  471. return tmp_table;
  472. }();
  473. template <int NumDispatchTargets>
  474. auto BM_SpeedOfLightDispatch(benchmark::State& state) -> void {
  475. std::string source =
  476. RandomMixedSeq(/*symbol_percent=*/25, /*keyword_percent=*/30);
  477. // A buffer to write to, simulating some minimal write traffic.
  478. llvm::OwningArrayRef<char> buffer(source.size());
  479. for (auto _ : state) {
  480. const char* text = source.data();
  481. benchmark::DoNotOptimize(text);
  482. // Use `ssize_t` to minimize indexing overhead.
  483. ssize_t i = 0;
  484. // The dispatch table tail-recurses through the entire string.
  485. DispatchTable<NumDispatchTargets>[static_cast<unsigned char>(text[i])](
  486. i, text, buffer.data());
  487. CARBON_CHECK(i == static_cast<ssize_t>(source.size()));
  488. benchmark::DoNotOptimize(buffer.data());
  489. }
  490. state.SetBytesProcessed(state.iterations() * source.size());
  491. state.counters["tokens_per_second"] = benchmark::Counter(
  492. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  493. }
  494. BENCHMARK(BM_SpeedOfLightDispatch<1>);
  495. BENCHMARK(BM_SpeedOfLightDispatch<2>);
  496. BENCHMARK(BM_SpeedOfLightDispatch<4>);
  497. BENCHMARK(BM_SpeedOfLightDispatch<8>);
  498. BENCHMARK(BM_SpeedOfLightDispatch<16>);
  499. BENCHMARK(BM_SpeedOfLightDispatch<32>);
  500. BENCHMARK(BM_SpeedOfLightDispatch<MaxDispatchTargets>);
  501. } // namespace
  502. } // namespace Carbon::Lex