tokenized_buffer_benchmark.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include <benchmark/benchmark.h>
  5. #include <algorithm>
  6. #include <array>
  7. #include <string>
  8. #include <utility>
  9. #include "absl/random/random.h"
  10. #include "common/check.h"
  11. #include "common/raw_string_ostream.h"
  12. #include "llvm/ADT/Sequence.h"
  13. #include "llvm/ADT/StringExtras.h"
  14. #include "testing/base/source_gen.h"
  15. #include "toolchain/base/shared_value_stores.h"
  16. #include "toolchain/diagnostics/diagnostic_emitter.h"
  17. #include "toolchain/diagnostics/null_diagnostics.h"
  18. #include "toolchain/lex/lex.h"
  19. #include "toolchain/lex/token_kind.h"
  20. #include "toolchain/lex/tokenized_buffer.h"
  21. namespace Carbon::Lex {
  22. namespace {
  23. // A large value for measurement stability without making benchmarking too slow.
  24. // Needs to be a multiple of 100 so we can easily divide it up into percentages,
  25. // and 1% itself needs to not be too tiny. This makes 100,000 a great balance.
  26. constexpr int NumTokens = 100'000;
  27. // Compute a random sequence of just identifiers.
  28. static auto RandomIdentifierSeq(int min_length, int max_length, bool uniform,
  29. llvm::StringRef separator = " ")
  30. -> std::string {
  31. auto& gen = Testing::SourceGen::Global();
  32. llvm::SmallVector<llvm::StringRef> ids =
  33. gen.GetShuffledIdentifiers(NumTokens, min_length, max_length, uniform);
  34. return llvm::join(ids, separator);
  35. }
  36. auto GetSymbolTokenTable() -> llvm::ArrayRef<TokenKind> {
  37. // Build our own table of symbols so we can use repetitions to skew the
  38. // distribution.
  39. static auto symbol_token_table_storage = [] {
  40. llvm::SmallVector<TokenKind> table;
  41. #define CARBON_SYMBOL_TOKEN(TokenName, Spelling) \
  42. table.push_back(TokenKind::TokenName);
  43. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName)
  44. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName)
  45. #include "toolchain/lex/token_kind.def"
  46. table.insert(table.end(), 32, TokenKind::Semi);
  47. table.insert(table.end(), 16, TokenKind::Comma);
  48. table.insert(table.end(), 12, TokenKind::Period);
  49. table.insert(table.end(), 8, TokenKind::Colon);
  50. table.insert(table.end(), 8, TokenKind::Equal);
  51. table.insert(table.end(), 4, TokenKind::Amp);
  52. table.insert(table.end(), 4, TokenKind::ColonExclaim);
  53. table.insert(table.end(), 4, TokenKind::EqualEqual);
  54. table.insert(table.end(), 4, TokenKind::ExclaimEqual);
  55. table.insert(table.end(), 4, TokenKind::MinusGreater);
  56. table.insert(table.end(), 4, TokenKind::Star);
  57. return table;
  58. }();
  59. return symbol_token_table_storage;
  60. }
  61. struct RandomSourceOptions {
  62. int symbol_percent = 0;
  63. int keyword_percent = 0;
  64. int numeric_literal_percent = 0;
  65. int string_literal_percent = 0;
  66. int tokens_per_line = NumTokens;
  67. int comment_line_percent = 0;
  68. int blank_line_percent = 0;
  69. auto Validate() -> void {
  70. auto is_percentage = [](int n) { return 0 <= n && n <= 100; };
  71. CARBON_CHECK(is_percentage(symbol_percent));
  72. CARBON_CHECK(is_percentage(keyword_percent));
  73. CARBON_CHECK(is_percentage(numeric_literal_percent));
  74. CARBON_CHECK(is_percentage(string_literal_percent));
  75. CARBON_CHECK(is_percentage(symbol_percent + keyword_percent +
  76. numeric_literal_percent +
  77. string_literal_percent));
  78. CARBON_CHECK(tokens_per_line <= NumTokens);
  79. CARBON_CHECK(
  80. NumTokens % tokens_per_line == 0,
  81. "Tokens per line of {0} does not divide the number of tokens {1}",
  82. tokens_per_line, NumTokens);
  83. CARBON_CHECK(is_percentage(comment_line_percent));
  84. CARBON_CHECK(is_percentage(blank_line_percent));
  85. // Ensure that comment and blank lines are less than 100% so we eventually
  86. // produce a token line.
  87. CARBON_CHECK(comment_line_percent + blank_line_percent < 100);
  88. }
  89. };
  90. // Based on measurements of LLVM's source code, a rough approximation of the
  91. // distribution of these kinds of tokens.
  92. constexpr RandomSourceOptions DefaultSourceDist = {
  93. .symbol_percent = 50,
  94. .keyword_percent = 7,
  95. .numeric_literal_percent = 17,
  96. .string_literal_percent = 1,
  97. // The median for LLVM is roughly 5.
  98. .tokens_per_line = 5,
  99. // Observed percentage of lines in LLVM.
  100. .comment_line_percent = 22,
  101. .blank_line_percent = 15,
  102. };
  103. // Compute random source code with a mixture of tokens and whitespace according
  104. // to the options. The source isn't designed to be valid, or directly
  105. // representative of real-world Carbon code. However, it tries to provide
  106. // reasonable coverage of the different aspects of Carbon's lexer, such that for
  107. // real world source code with distributions similar to the options provided the
  108. // lexer performance will be roughly representative.
  109. //
  110. // TODO: Does not yet support generating numeric or string literals.
  111. //
  112. // TODO: The shape of lines is handled very arbitrarily and should vary more to
  113. // avoid over-fitting to a specific shape (number of tokens, length of comment).
  114. auto RandomSource(RandomSourceOptions options) -> std::string {
  115. options.Validate();
  116. static_assert((NumTokens % 100) == 0,
  117. "The number of tokens must be divisible by 100 so that we can "
  118. "easily scale integer percentages up to it.");
  119. // Get static pools of symbols, keywords, and identifiers.
  120. llvm::ArrayRef<TokenKind> symbols = GetSymbolTokenTable();
  121. llvm::ArrayRef<TokenKind> keywords = TokenKind::KeywordTokens;
  122. // Build a list of StringRefs from the different types with the desired
  123. // distribution, then shuffle that list.
  124. llvm::OwningArrayRef<llvm::StringRef> tokens(NumTokens);
  125. int num_symbols = (NumTokens / 100) * options.symbol_percent;
  126. int num_keywords = (NumTokens / 100) * options.keyword_percent;
  127. int num_identifiers = NumTokens - num_symbols - num_keywords;
  128. CARBON_CHECK(
  129. num_identifiers == 0 || num_identifiers > 500,
  130. "We require at least 500 identifiers as we need to collect a reasonable "
  131. "number of samples to end up with a reasonable distribution of lengths.");
  132. llvm::SmallVector<llvm::StringRef> ids =
  133. Testing::SourceGen::Global().GetIdentifiers(num_identifiers);
  134. for (int i : llvm::seq(num_symbols)) {
  135. tokens[i] = symbols[i % symbols.size()].fixed_spelling();
  136. }
  137. for (int i : llvm::seq(num_keywords)) {
  138. tokens[num_symbols + i] = keywords[i % keywords.size()].fixed_spelling();
  139. }
  140. for (int i : llvm::seq(num_identifiers)) {
  141. // We always have enough identifiers, so no need to mod here.
  142. tokens[num_symbols + num_keywords + i] = ids[i];
  143. }
  144. std::shuffle(tokens.begin(), tokens.end(), absl::BitGen());
  145. // Distribute the tokens across lines as well as horizontal whitespace. The
  146. // goal isn't to make any one line representative of anything, but to make the
  147. // rough density of different kinds of whitespace roughly representative.
  148. //
  149. // TODO: This is a really coarse approach that just picks a fixed number of
  150. // tokens per line rather than using some distribution with this as the median
  151. // or mean.
  152. llvm::SmallVector<std::string> lines;
  153. // First place tokens onto each line.
  154. for (auto i : llvm::seq(NumTokens / options.tokens_per_line)) {
  155. lines.push_back("");
  156. RawStringOstream os;
  157. // Arbitrarily indent each line by two spaces.
  158. os << " ";
  159. llvm::ListSeparator sep(" ");
  160. for (int j : llvm::seq(options.tokens_per_line)) {
  161. os << sep << tokens[i * options.tokens_per_line + j];
  162. }
  163. lines.push_back(os.TakeStr());
  164. }
  165. // Next, synthesize blank and comment lines with the correct distribution.
  166. int token_line_percent =
  167. 100 - options.blank_line_percent - options.comment_line_percent;
  168. CARBON_CHECK(token_line_percent > 0);
  169. int num_token_lines = lines.size();
  170. int num_lines = num_token_lines * 100 / token_line_percent;
  171. int num_blank_lines = num_lines * options.blank_line_percent / 100;
  172. int num_comment_lines = num_lines - num_blank_lines - num_token_lines;
  173. CARBON_CHECK(num_comment_lines >= 0);
  174. lines.resize(num_lines);
  175. for (auto& line :
  176. llvm::MutableArrayRef(lines).slice(num_lines - num_comment_lines)) {
  177. // TODO: We should vary the content and length, especially as the
  178. // distribution is weirdly shaped with just over half the comment lines
  179. // being blank and the median length of non-black comment lines being 64!
  180. // This is a *very* coarse approximation of the mean at 30 characters long.
  181. line = " // abcdefghijklmnopqrstuvwxyz";
  182. }
  183. // Now shuffle the lines.
  184. std::shuffle(lines.begin(), lines.end(), absl::BitGen());
  185. // And join them into the source string.
  186. return llvm::join(lines, "\n");
  187. }
  188. class LexerBenchHelper {
  189. public:
  190. explicit LexerBenchHelper(llvm::StringRef text)
  191. : source_(MakeSourceBuffer(text)) {}
  192. auto Lex() -> TokenizedBuffer {
  193. Diagnostics::Consumer& consumer = Diagnostics::NullConsumer();
  194. Lex::LexOptions options;
  195. options.consumer = &consumer;
  196. return Lex::Lex(value_stores_, source_, options);
  197. }
  198. auto DiagnoseErrors() -> std::string {
  199. RawStringOstream result;
  200. Diagnostics::StreamConsumer consumer(&result);
  201. Lex::LexOptions options;
  202. options.consumer = &consumer;
  203. auto buffer = Lex::Lex(value_stores_, source_, options);
  204. consumer.Flush();
  205. CARBON_CHECK(buffer.has_errors(),
  206. "Asked to diagnose errors but none found!");
  207. return result.TakeStr();
  208. }
  209. auto source_text() -> llvm::StringRef { return source_.text(); }
  210. private:
  211. auto MakeSourceBuffer(llvm::StringRef text) -> SourceBuffer {
  212. CARBON_CHECK(fs_.addFile(filename_, /*ModificationTime=*/0,
  213. llvm::MemoryBuffer::getMemBuffer(text)));
  214. return std::move(*SourceBuffer::MakeFromFile(
  215. fs_, filename_, Diagnostics::ConsoleConsumer()));
  216. }
  217. SharedValueStores value_stores_;
  218. llvm::vfs::InMemoryFileSystem fs_;
  219. std::string filename_ = "test.carbon";
  220. SourceBuffer source_;
  221. };
  222. auto BM_ValidKeywords(benchmark::State& state) -> void {
  223. absl::BitGen gen;
  224. std::array<llvm::StringRef, NumTokens> tokens;
  225. for (int i : llvm::seq(NumTokens)) {
  226. tokens[i] = TokenKind::KeywordTokens[i % TokenKind::KeywordTokens.size()]
  227. .fixed_spelling();
  228. }
  229. std::shuffle(tokens.begin(), tokens.end(), gen);
  230. std::string source = llvm::join(tokens, " ");
  231. LexerBenchHelper helper(source);
  232. for (auto _ : state) {
  233. TokenizedBuffer buffer = helper.Lex();
  234. CARBON_CHECK(!buffer.has_errors());
  235. }
  236. state.SetBytesProcessed(state.iterations() * source.size());
  237. state.counters["tokens_per_second"] = benchmark::Counter(
  238. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  239. }
  240. BENCHMARK(BM_ValidKeywords);
  241. auto BM_ValidKeywordsAsRawIdentifiers(benchmark::State& state) -> void {
  242. absl::BitGen gen;
  243. std::array<llvm::StringRef, NumTokens> tokens;
  244. for (int i : llvm::seq(NumTokens)) {
  245. tokens[i] = TokenKind::KeywordTokens[i % TokenKind::KeywordTokens.size()]
  246. .fixed_spelling();
  247. }
  248. std::shuffle(tokens.begin(), tokens.end(), gen);
  249. std::string source("r#");
  250. source.append(llvm::join(tokens, " r#"));
  251. LexerBenchHelper helper(source);
  252. for (auto _ : state) {
  253. TokenizedBuffer buffer = helper.Lex();
  254. CARBON_CHECK(!buffer.has_errors());
  255. }
  256. state.SetBytesProcessed(state.iterations() * source.size());
  257. state.counters["tokens_per_second"] = benchmark::Counter(
  258. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  259. }
  260. BENCHMARK(BM_ValidKeywordsAsRawIdentifiers);
  261. // This benchmark does a 50-50 split of r-prefixed and r#-prefixed identifiers
  262. // to directly compare raw and non-raw performance.
  263. auto BM_RawIdentifierFocus(benchmark::State& state) -> void {
  264. llvm::SmallVector<llvm::StringRef> ids =
  265. Testing::SourceGen::Global().GetIdentifiers(NumTokens / 2);
  266. llvm::SmallVector<std::string> modified_ids;
  267. // As we resize, start with the in-use prefix. Note that `r#` uses the first
  268. // character of the original identifier.
  269. modified_ids.resize(NumTokens / 2, "r#");
  270. modified_ids.resize(NumTokens, "r");
  271. for (int i : llvm::seq(NumTokens / 2)) {
  272. // Use the same identifier both ways.
  273. modified_ids[i].append(ids[i]);
  274. modified_ids[i + NumTokens / 2].append(
  275. llvm::StringRef(ids[i]).drop_front());
  276. }
  277. absl::BitGen gen;
  278. std::array<llvm::StringRef, NumTokens> tokens;
  279. for (int i : llvm::seq(NumTokens)) {
  280. tokens[i] = modified_ids[i];
  281. }
  282. std::shuffle(tokens.begin(), tokens.end(), gen);
  283. std::string source = llvm::join(tokens, " ");
  284. LexerBenchHelper helper(source);
  285. for (auto _ : state) {
  286. TokenizedBuffer buffer = helper.Lex();
  287. CARBON_CHECK(!buffer.has_errors());
  288. }
  289. state.SetBytesProcessed(state.iterations() * source.size());
  290. state.counters["tokens_per_second"] = benchmark::Counter(
  291. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  292. }
  293. BENCHMARK(BM_RawIdentifierFocus);
  294. template <int MinLength, int MaxLength, bool Uniform>
  295. auto BM_ValidIdentifiers(benchmark::State& state) -> void {
  296. std::string source = RandomIdentifierSeq(MinLength, MaxLength, Uniform);
  297. LexerBenchHelper helper(source);
  298. for (auto _ : state) {
  299. TokenizedBuffer buffer = helper.Lex();
  300. CARBON_CHECK(!buffer.has_errors(), "{0}", helper.DiagnoseErrors());
  301. }
  302. state.SetBytesProcessed(state.iterations() * source.size());
  303. state.counters["tokens_per_second"] = benchmark::Counter(
  304. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  305. }
  306. // Benchmark the non-uniform distribution we observe in C++ code.
  307. BENCHMARK(BM_ValidIdentifiers<1, 64, /*Uniform=*/false>);
  308. // Also benchmark a few uniform distribution ranges of identifier widths to
  309. // cover different patterns that emerge with small, medium, and longer
  310. // identifiers.
  311. BENCHMARK(BM_ValidIdentifiers<1, 1, /*Uniform=*/true>);
  312. BENCHMARK(BM_ValidIdentifiers<3, 5, /*Uniform=*/true>);
  313. BENCHMARK(BM_ValidIdentifiers<3, 16, /*Uniform=*/true>);
  314. BENCHMARK(BM_ValidIdentifiers<12, 64, /*Uniform=*/true>);
  315. BENCHMARK(BM_ValidIdentifiers<16, 16, /*Uniform=*/true>);
  316. BENCHMARK(BM_ValidIdentifiers<24, 24, /*Uniform=*/true>);
  317. BENCHMARK(BM_ValidIdentifiers<32, 32, /*Uniform=*/true>);
  318. BENCHMARK(BM_ValidIdentifiers<48, 48, /*Uniform=*/true>);
  319. BENCHMARK(BM_ValidIdentifiers<64, 64, /*Uniform=*/true>);
  320. BENCHMARK(BM_ValidIdentifiers<80, 80, /*Uniform=*/true>);
  321. // Benchmark to stress the lexing of horizontal whitespace. This sets up what is
  322. // nearly a worst-case scenario of short-but-expensive-to-lex tokens with runs
  323. // of horizontal whitespace between them.
  324. auto BM_HorizontalWhitespace(benchmark::State& state) -> void {
  325. int num_spaces = state.range(0);
  326. std::string separator(num_spaces, ' ');
  327. std::string source = RandomIdentifierSeq(3, 5, /*uniform=*/true, separator);
  328. LexerBenchHelper helper(source);
  329. for (auto _ : state) {
  330. TokenizedBuffer buffer = helper.Lex();
  331. // Ensure that lexing actually occurs for benchmarking and that it doesn't
  332. // hit errors that would skew the benchmark results.
  333. CARBON_CHECK(!buffer.has_errors(), "{0}", helper.DiagnoseErrors());
  334. }
  335. state.SetBytesProcessed(state.iterations() * source.size());
  336. state.counters["tokens_per_second"] = benchmark::Counter(
  337. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  338. }
  339. BENCHMARK(BM_HorizontalWhitespace)->RangeMultiplier(4)->Range(1, 128);
  340. auto BM_RandomSource(benchmark::State& state) -> void {
  341. std::string source = RandomSource(DefaultSourceDist);
  342. LexerBenchHelper helper(source);
  343. for (auto _ : state) {
  344. TokenizedBuffer buffer = helper.Lex();
  345. // Ensure that lexing actually occurs for benchmarking and that it doesn't
  346. // hit errors that would skew the benchmark results.
  347. CARBON_CHECK(!buffer.has_errors(), "{0}", helper.DiagnoseErrors());
  348. }
  349. state.SetBytesProcessed(state.iterations() * source.size());
  350. state.counters["tokens_per_second"] = benchmark::Counter(
  351. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  352. state.counters["lines_per_second"] =
  353. benchmark::Counter(llvm::StringRef(source).count('\n'),
  354. benchmark::Counter::kIsIterationInvariantRate);
  355. }
  356. // The distributions between symbols, keywords, and identifiers here are
  357. // guesses. Eventually, we should collect more data to help tune these, but
  358. // hopefully the performance isn't too sensitive and we can just cover a wide
  359. // range here.
  360. BENCHMARK(BM_RandomSource);
  361. // Benchmark to stress opening and closing grouped symbols.
  362. auto BM_GroupingSymbols(benchmark::State& state) -> void {
  363. int curly_brace_depth = state.range(0);
  364. int paren_depth = state.range(1);
  365. int square_bracket_depth = state.range(2);
  366. // TODO: It might be interesting to have some random pattern of nesting, but
  367. // the obvious ways to do that result it really unstable total size of input
  368. // or unbalanced groups. For now, just use a simple strict nesting approach.
  369. // It should still let us look for specific pain points. We do include some
  370. // whitespace and keywords to make sure *some* other parts of the benchmark
  371. // are also active and have some reasonable icache pressure.
  372. llvm::SmallVector<llvm::StringRef> ids =
  373. Testing::SourceGen::Global().GetShuffledIdentifiers(NumTokens);
  374. RawStringOstream os;
  375. int num_tokens_per_nest =
  376. curly_brace_depth * 2 + paren_depth * 2 + square_bracket_depth * 2 + 2;
  377. int num_nests = NumTokens / num_tokens_per_nest;
  378. for (int i : llvm::seq(num_nests)) {
  379. for (int j : llvm::seq(curly_brace_depth)) {
  380. os.indent(j * 2) << "{\n";
  381. }
  382. os.indent(curly_brace_depth * 2);
  383. for ([[maybe_unused]] int j : llvm::seq(paren_depth)) {
  384. os << "(";
  385. }
  386. for ([[maybe_unused]] int j : llvm::seq(square_bracket_depth)) {
  387. os << "[";
  388. }
  389. os << ids[(i * 2) % NumTokens];
  390. for ([[maybe_unused]] int j : llvm::seq(square_bracket_depth)) {
  391. os << "]";
  392. }
  393. for ([[maybe_unused]] int j : llvm::seq(paren_depth)) {
  394. os << ")";
  395. }
  396. for (int j : llvm::reverse(llvm::seq(curly_brace_depth))) {
  397. os << "\n";
  398. os.indent(j * 2) << "}";
  399. }
  400. os << ids[(i * 2 + 1) % NumTokens] << "\n";
  401. }
  402. std::string source = os.TakeStr();
  403. LexerBenchHelper helper(source);
  404. for (auto _ : state) {
  405. TokenizedBuffer buffer = helper.Lex();
  406. // Ensure that lexing actually occurs for benchmarking and that it doesn't
  407. // hit errors that would skew the benchmark results.
  408. CARBON_CHECK(!buffer.has_errors(), "{0}", helper.DiagnoseErrors());
  409. }
  410. state.SetBytesProcessed(state.iterations() * source.size());
  411. state.counters["tokens_per_second"] = benchmark::Counter(
  412. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  413. state.counters["lines_per_second"] =
  414. benchmark::Counter(llvm::StringRef(source).count('\n'),
  415. benchmark::Counter::kIsIterationInvariantRate);
  416. }
  417. BENCHMARK(BM_GroupingSymbols)
  418. ->ArgsProduct({
  419. {1, 2, 3, 4, 8, 16, 32},
  420. {0},
  421. {0},
  422. })
  423. ->ArgsProduct({
  424. {0},
  425. {1, 2, 3, 4, 8, 16, 32},
  426. {0},
  427. })
  428. ->ArgsProduct({
  429. {0},
  430. {0},
  431. {1, 2, 3, 4, 8, 16, 32},
  432. })
  433. ->ArgsProduct({
  434. {32},
  435. {1, 2, 3, 4, 8, 16, 32},
  436. {0},
  437. })
  438. ->ArgsProduct({
  439. {32},
  440. {32},
  441. {1, 2, 3, 4, 8, 16, 32},
  442. });
  443. // Benchmark to stress the lexing of blank lines. This uses a simple, easy to
  444. // lex token, but separates each one by varying numbers of blank lines.
  445. auto BM_BlankLines(benchmark::State& state) -> void {
  446. int num_blank_lines = state.range(0);
  447. std::string separator(num_blank_lines, '\n');
  448. std::string source = RandomIdentifierSeq(3, 5, /*uniform=*/true, separator);
  449. LexerBenchHelper helper(source);
  450. for (auto _ : state) {
  451. TokenizedBuffer buffer = helper.Lex();
  452. // Ensure that lexing actually occurs for benchmarking and that it doesn't
  453. // hit errors that would skew the benchmark results.
  454. CARBON_CHECK(!buffer.has_errors(), "{0}", helper.DiagnoseErrors());
  455. }
  456. state.SetBytesProcessed(state.iterations() * source.size());
  457. state.counters["tokens_per_second"] = benchmark::Counter(
  458. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  459. state.counters["lines_per_second"] =
  460. benchmark::Counter(llvm::StringRef(source).count('\n'),
  461. benchmark::Counter::kIsIterationInvariantRate);
  462. }
  463. BENCHMARK(BM_BlankLines)->RangeMultiplier(4)->Range(1, 128);
  464. // Benchmark to stress the lexing of comment lines. This uses a simple, easy to
  465. // lex token, but separates each one by varying numbers of comment lines, with
  466. // varying comment line length and indentation.
  467. auto BM_CommentLines(benchmark::State& state) -> void {
  468. int num_comment_lines = state.range(0);
  469. int comment_length = state.range(1);
  470. int comment_indent = state.range(2);
  471. RawStringOstream os;
  472. os << "\n";
  473. for (int i : llvm::seq(num_comment_lines)) {
  474. static_cast<void>(i);
  475. os << std::string(comment_indent, ' ') << "//"
  476. << std::string(comment_length, ' ') << "\n";
  477. }
  478. std::string source =
  479. RandomIdentifierSeq(3, 5, /*uniform=*/true, os.TakeStr());
  480. LexerBenchHelper helper(source);
  481. for (auto _ : state) {
  482. TokenizedBuffer buffer = helper.Lex();
  483. // Ensure that lexing actually occurs for benchmarking and that it doesn't
  484. // hit errors that would skew the benchmark results.
  485. CARBON_CHECK(!buffer.has_errors(), "{0}", helper.DiagnoseErrors());
  486. }
  487. state.SetBytesProcessed(state.iterations() * source.size());
  488. state.counters["tokens_per_second"] = benchmark::Counter(
  489. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  490. state.counters["lines_per_second"] =
  491. benchmark::Counter(llvm::StringRef(source).count('\n'),
  492. benchmark::Counter::kIsIterationInvariantRate);
  493. }
  494. BENCHMARK(BM_CommentLines)
  495. ->ArgsProduct({
  496. // How many lines of comment. Focused on a couple of small and checking
  497. // how it scales up to large blocks.
  498. {1, 4, 128},
  499. // Comment lengths: the two extremes and a middling length.
  500. {0, 30, 70},
  501. // Comment indentations.
  502. {0, 2, 8},
  503. });
  504. // This is a speed-of-light benchmark that should reflect memory bandwidth
  505. // (ideally) of simply reading all the source code. For speed-of-light we use
  506. // `strcpy` -- this both examines ever byte of the input looking for a null to
  507. // end the copy, and also writes to a data structure of roughly the same size as
  508. // the input. This routine is one we expect to be *very* well optimized and give
  509. // a good approximation of the fastest possible lexer given the physical
  510. // constraints of the machine. Note that which particular source we use as input
  511. // here isn't especially interesting, so we just pick one and should update it
  512. // to reflect whatever distribution is most realistic long-term. The
  513. // bytes/second throughput is the important output of this routine.
  514. auto BM_SpeedOfLightStrCpy(benchmark::State& state) -> void {
  515. std::string source = RandomSource(DefaultSourceDist);
  516. // A buffer to write the null-terminated contents of `source` into.
  517. llvm::OwningArrayRef<char> buffer(source.size() + 1);
  518. for (auto _ : state) {
  519. const char* text = source.data();
  520. benchmark::DoNotOptimize(text);
  521. strcpy(buffer.data(), text);
  522. benchmark::DoNotOptimize(buffer.data());
  523. }
  524. state.SetBytesProcessed(state.iterations() * source.size());
  525. state.counters["tokens_per_second"] = benchmark::Counter(
  526. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  527. state.counters["lines_per_second"] =
  528. benchmark::Counter(llvm::StringRef(source).count('\n'),
  529. benchmark::Counter::kIsIterationInvariantRate);
  530. }
  531. BENCHMARK(BM_SpeedOfLightStrCpy);
  532. // This is a speed-of-light benchmark that builds up a best-case byte-wise table
  533. // dispatch using guaranteed tail recursion. The goal is both to ensure the
  534. // general technique can reasonably hit the level of performance we need and to
  535. // establish how far from this speed of light the actual lexer currently sits.
  536. //
  537. // A major impact on the observed performance of this technique is how many
  538. // different functions are reached in this dispatch loop. This benchmark
  539. // infrastructure tries to bracket the range of performance this technique
  540. // affords with different numbers of dispatch target functions.
  541. using DispatchPtrT = auto (*)(ssize_t& index, const char* text, char* buffer)
  542. -> void;
  543. using DispatchTableT = std::array<DispatchPtrT, 256>;
  544. template <const DispatchTableT& Table>
  545. auto BasicDispatch(ssize_t& index, const char* text, char* buffer) -> void {
  546. *buffer = text[index];
  547. ++index;
  548. // NOLINTNEXTLINE(readability-avoid-return-with-void-value): For musttail.
  549. [[clang::musttail]] return Table[static_cast<unsigned char>(text[index])](
  550. index, text, buffer);
  551. }
  552. template <const DispatchTableT& Table, char C>
  553. auto SpecializedDispatch(ssize_t& index, const char* text, char* buffer)
  554. -> void {
  555. CARBON_CHECK(C == text[index]);
  556. *buffer = C;
  557. ++index;
  558. // NOLINTNEXTLINE(readability-avoid-return-with-void-value): For musttail.
  559. [[clang::musttail]] return Table[static_cast<unsigned char>(text[index])](
  560. index, text, buffer);
  561. }
  562. // A sample of the symbol characters used in Carbon code. Doesn't need to be
  563. // perfect, as we just need to have a reasonably large # of distinct dispatch
  564. // functions.
  565. constexpr char DispatchSpecializableSymbols[] = {
  566. '!', '%', '(', ')', '*', '+', ',', '-', '.', ':',
  567. ';', '<', '=', '>', '?', '[', ']', '{', '}', '~',
  568. };
  569. // Create an array of all the characters we can specialize dispatch over --
  570. // [0-9A-Za-z] and the symbols above. Similar to the above symbols, doesn't need
  571. // to be exhaustive.
  572. constexpr std::array<char, 26 * 2 + 10 + sizeof(DispatchSpecializableSymbols)>
  573. DispatchSpecializableChars = []() {
  574. constexpr int Size = sizeof(DispatchSpecializableChars);
  575. std::array<char, Size> chars = {};
  576. int i = 0;
  577. for (char c = '0'; c <= '9'; ++c) {
  578. chars[i] = c;
  579. ++i;
  580. }
  581. for (char c = 'A'; c <= 'Z'; ++c) {
  582. chars[i] = c;
  583. ++i;
  584. }
  585. for (char c = 'a'; c <= 'z'; ++c) {
  586. chars[i] = c;
  587. ++i;
  588. }
  589. for (char c : DispatchSpecializableSymbols) {
  590. chars[i] = c;
  591. ++i;
  592. }
  593. CARBON_CHECK(i == Size);
  594. return chars;
  595. }();
  596. // Instantiate a number of specialized dispatch functions for characters in the
  597. // array above, and assign those function addresses to the character's entry in
  598. // the provided table. The provided `tmp_table` is a temporary that will
  599. // eventually initialize the provided `Table` constant, so the constant is what
  600. // we propagate to the instantiated function and the temporary is the one we
  601. // initialize.
  602. template <const DispatchTableT& Table, size_t... Indices>
  603. constexpr auto SpecializeDispatchTable(
  604. DispatchTableT& tmp_table, std::index_sequence<Indices...> /*indices*/)
  605. -> void {
  606. static_assert(sizeof...(Indices) <= sizeof(DispatchSpecializableChars));
  607. ((tmp_table[static_cast<unsigned char>(DispatchSpecializableChars[Indices])] =
  608. &SpecializedDispatch<Table, DispatchSpecializableChars[Indices]>),
  609. ...);
  610. }
  611. // The maximum number of dispatch targets is the size of the array + 1 (for the
  612. // base case target).
  613. constexpr int MaxDispatchTargets = sizeof(DispatchSpecializableChars) + 1;
  614. // Dispatch tables with a provided number of distinct dispatch targets. There
  615. // will always be one additional target for the null byte to end the loop.
  616. template <int NumDispatchTargets>
  617. constexpr DispatchTableT DispatchTable = []() {
  618. static_assert(NumDispatchTargets > 0, "Need at least one dispatch target.");
  619. static_assert(NumDispatchTargets <= MaxDispatchTargets,
  620. "Limited number of dispatch targets available.");
  621. DispatchTableT tmp_table = {};
  622. // Start with the basic dispatch target.
  623. for (int i = 0; i < 256; ++i) {
  624. tmp_table[i] = &BasicDispatch<DispatchTable<NumDispatchTargets>>;
  625. }
  626. // NOLINTNEXTLINE(readability-braces-around-statements): False positive.
  627. if constexpr (NumDispatchTargets > 1) {
  628. // Add additional dispatch targets from our specializable array.
  629. SpecializeDispatchTable<DispatchTable<NumDispatchTargets>>(
  630. tmp_table, std::make_index_sequence<NumDispatchTargets - 1>());
  631. }
  632. // Special case the null byte index to end the tail-dispatch.
  633. tmp_table[0] =
  634. +[](ssize_t& index, const char* text, char* /*buffer*/) -> void {
  635. CARBON_CHECK(text[index] == '\0');
  636. return;
  637. };
  638. return tmp_table;
  639. }();
  640. template <int NumDispatchTargets>
  641. auto BM_SpeedOfLightDispatch(benchmark::State& state) -> void {
  642. std::string source = RandomSource(DefaultSourceDist);
  643. // A buffer to write to, simulating some minimal write traffic.
  644. llvm::OwningArrayRef<char> buffer(source.size());
  645. for (auto _ : state) {
  646. const char* text = source.data();
  647. benchmark::DoNotOptimize(text);
  648. // Use `ssize_t` to minimize indexing overhead.
  649. ssize_t i = 0;
  650. // The dispatch table tail-recurses through the entire string.
  651. DispatchTable<NumDispatchTargets>[static_cast<unsigned char>(text[i])](
  652. i, text, buffer.data());
  653. CARBON_CHECK(i == static_cast<ssize_t>(source.size()));
  654. benchmark::DoNotOptimize(buffer.data());
  655. }
  656. state.SetBytesProcessed(state.iterations() * source.size());
  657. state.counters["tokens_per_second"] = benchmark::Counter(
  658. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  659. state.counters["lines_per_second"] =
  660. benchmark::Counter(llvm::StringRef(source).count('\n'),
  661. benchmark::Counter::kIsIterationInvariantRate);
  662. }
  663. BENCHMARK(BM_SpeedOfLightDispatch<1>);
  664. BENCHMARK(BM_SpeedOfLightDispatch<2>);
  665. BENCHMARK(BM_SpeedOfLightDispatch<4>);
  666. BENCHMARK(BM_SpeedOfLightDispatch<8>);
  667. BENCHMARK(BM_SpeedOfLightDispatch<16>);
  668. BENCHMARK(BM_SpeedOfLightDispatch<32>);
  669. BENCHMARK(BM_SpeedOfLightDispatch<MaxDispatchTargets>);
  670. } // namespace
  671. } // namespace Carbon::Lex