tokenized_buffer_benchmark.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include <benchmark/benchmark.h>
  5. #include <algorithm>
  6. #include "absl/random/random.h"
  7. #include "common/check.h"
  8. #include "llvm/ADT/Sequence.h"
  9. #include "llvm/ADT/StringExtras.h"
  10. #include "toolchain/diagnostics/diagnostic_emitter.h"
  11. #include "toolchain/diagnostics/null_diagnostics.h"
  12. #include "toolchain/lex/token_kind.h"
  13. #include "toolchain/lex/tokenized_buffer.h"
  14. namespace Carbon::Lex {
  15. namespace {
  16. // A large value for measurement stability without making benchmarking too slow.
  17. // Needs to be a multiple of 100 so we can easily divide it up into percentages,
  18. // and 1% itself needs to not be too tiny. This makes 100,000 a great balance.
  19. constexpr int NumTokens = 100'000;
  20. auto IdentifierStartChars() -> llvm::ArrayRef<char> {
  21. static llvm::SmallVector<char> chars = [] {
  22. llvm::SmallVector<char> chars;
  23. chars.push_back('_');
  24. for (char c : llvm::seq_inclusive('A', 'Z')) {
  25. chars.push_back(c);
  26. }
  27. for (char c : llvm::seq_inclusive('a', 'z')) {
  28. chars.push_back(c);
  29. }
  30. return chars;
  31. }();
  32. return chars;
  33. }
  34. auto IdentifierChars() -> llvm::ArrayRef<char> {
  35. static llvm::SmallVector<char> chars = [] {
  36. llvm::ArrayRef<char> start_chars = IdentifierStartChars();
  37. llvm::SmallVector<char> chars(start_chars.begin(), start_chars.end());
  38. for (char c : llvm::seq_inclusive('0', '9')) {
  39. chars.push_back(c);
  40. }
  41. return chars;
  42. }();
  43. return chars;
  44. }
  45. // Generates a random identifier string of the specified length using the
  46. // provided RNG BitGen.
  47. auto GenerateRandomIdentifier(absl::BitGen& gen, int length) -> std::string {
  48. llvm::ArrayRef<char> start_chars = IdentifierStartChars();
  49. llvm::ArrayRef<char> chars = IdentifierChars();
  50. std::string id_result;
  51. llvm::raw_string_ostream os(id_result);
  52. llvm::StringRef id;
  53. do {
  54. // Erase any prior attempts to find an identifier.
  55. id_result.clear();
  56. os << start_chars[absl::Uniform<int>(gen, 0, start_chars.size())];
  57. for (int j : llvm::seq(0, length)) {
  58. static_cast<void>(j);
  59. os << chars[absl::Uniform<int>(gen, 0, chars.size())];
  60. }
  61. // Check if we ended up forming an integer type literal or a keyword, and
  62. // try again.
  63. id = llvm::StringRef(id_result);
  64. } while (
  65. llvm::any_of(TokenKind::KeywordTokens,
  66. [id](auto token) { return id == token.fixed_spelling(); }) ||
  67. ((id.consume_front("i") || id.consume_front("u") ||
  68. id.consume_front("f")) &&
  69. llvm::all_of(id, [](const char c) { return llvm::isDigit(c); })));
  70. return id_result;
  71. }
  72. // Get a static pool of random identifiers with the desired distribution.
  73. template <int MinLength = 1, int MaxLength = 64, bool Uniform = false>
  74. auto GetRandomIdentifiers() -> const std::array<std::string, NumTokens>& {
  75. static_assert(MinLength <= MaxLength);
  76. static_assert(
  77. Uniform || MaxLength <= 64,
  78. "Cannot produce a meaningful non-uniform distribution of lengths longer "
  79. "than 64 as those are exceedingly rare in our observed data sets.");
  80. static const std::array<std::string, NumTokens> id_storage = [] {
  81. std::array<int, 64> id_length_counts;
  82. // For non-uniform distribution, we simulate a distribution roughly based on
  83. // the observed histogram of identifier lengths, but smoothed a bit and
  84. // reduced to small counts so that we cycle through all the lengths
  85. // reasonably quickly. We want sampling of even 10% of NumTokens from this
  86. // in a round-robin form to not be skewed overly much. This still inherently
  87. // compresses the long tail as we'd rather have coverage even though it
  88. // distorts the distribution a bit.
  89. //
  90. // The distribution here comes from a script that analyzes source code run
  91. // over a few directories of LLVM. The script renders a visual ascii-art
  92. // histogram along with the data for each bucket, and that output is
  93. // included in comments above each bucket size below to help visualize the
  94. // rough shape we're aiming for.
  95. //
  96. // 1 characters [3976] ███████████████████████████████▊
  97. id_length_counts[0] = 40;
  98. // 2 characters [3724] █████████████████████████████▊
  99. id_length_counts[1] = 40;
  100. // 3 characters [4173] █████████████████████████████████▍
  101. id_length_counts[2] = 40;
  102. // 4 characters [5000] ████████████████████████████████████████
  103. id_length_counts[3] = 50;
  104. // 5 characters [1568] ████████████▌
  105. id_length_counts[4] = 20;
  106. // 6 characters [2226] █████████████████▊
  107. id_length_counts[5] = 20;
  108. // 7 characters [2380] ███████████████████
  109. id_length_counts[6] = 20;
  110. // 8 characters [1786] ██████████████▎
  111. id_length_counts[7] = 18;
  112. // 9 characters [1397] ███████████▏
  113. id_length_counts[8] = 12;
  114. // 10 characters [ 739] █████▉
  115. id_length_counts[9] = 12;
  116. // 11 characters [ 779] ██████▎
  117. id_length_counts[10] = 12;
  118. // 12 characters [1344] ██████████▊
  119. id_length_counts[11] = 12;
  120. // 13 characters [ 498] ████
  121. id_length_counts[12] = 5;
  122. // 14 characters [ 284] ██▎
  123. id_length_counts[13] = 3;
  124. // 15 characters [ 172] █▍
  125. // 16 characters [ 278] ██▎
  126. // 17 characters [ 191] █▌
  127. // 18 characters [ 207] █▋
  128. for (int i : llvm::seq(14, 18)) {
  129. id_length_counts[i] = 2;
  130. }
  131. // 19 - 63 characters are all <100 but non-zero, and we map them to 1 for
  132. // coverage despite slightly over weighting the tail.
  133. for (int i : llvm::seq(18, 64)) {
  134. id_length_counts[i] = 1;
  135. }
  136. // Used to track the different count buckets when in a non-uniform
  137. // distribution.
  138. int length_bucket_index = 0;
  139. int length_count = 0;
  140. std::array<std::string, NumTokens> ids;
  141. absl::BitGen gen;
  142. for (auto [i, id] : llvm::enumerate(ids)) {
  143. if (Uniform) {
  144. // Rather than using randomness, for a uniform distribution rotate
  145. // lengths in round-robin to get a deterministic and exact size on every
  146. // run. We will then shuffle them at the end to produce a random
  147. // ordering.
  148. int length = MinLength + i % (1 + MaxLength - MinLength);
  149. id = GenerateRandomIdentifier(gen, length);
  150. continue;
  151. }
  152. // For non-uniform distribution, walk through each each length bucket
  153. // until our count matches the desired distribution, and then move to the
  154. // next.
  155. id = GenerateRandomIdentifier(gen, length_bucket_index + 1);
  156. if (length_count < id_length_counts[length_bucket_index]) {
  157. ++length_count;
  158. } else {
  159. length_bucket_index =
  160. (length_bucket_index + 1) % id_length_counts.size();
  161. length_count = 0;
  162. }
  163. }
  164. return ids;
  165. }();
  166. return id_storage;
  167. }
  168. // Compute a random sequence of just identifiers.
  169. template <int MinLength = 1, int MaxLength = 64, bool Uniform = false>
  170. auto RandomIdentifierSeq() -> std::string {
  171. // Get a static pool of identifiers with the desired distribution.
  172. const std::array<std::string, NumTokens>& ids =
  173. GetRandomIdentifiers<MinLength, MaxLength, Uniform>();
  174. // Shuffle tokens so we get exactly one of each identifier but in a random
  175. // order.
  176. std::array<llvm::StringRef, NumTokens> tokens;
  177. for (int i : llvm::seq(NumTokens)) {
  178. tokens[i] = ids[i];
  179. }
  180. std::shuffle(tokens.begin(), tokens.end(), absl::BitGen());
  181. return llvm::join(tokens, " ");
  182. }
  183. auto GetSymbolTokenTable() -> llvm::ArrayRef<TokenKind> {
  184. // Build our own table of symbols so we can use repetitions to skew the
  185. // distribution.
  186. static auto symbol_token_table_storage = [] {
  187. llvm::SmallVector<TokenKind> table;
  188. #define CARBON_SYMBOL_TOKEN(TokenName, Spelling) \
  189. table.push_back(TokenKind::TokenName);
  190. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName)
  191. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName)
  192. #include "toolchain/lex/token_kind.def"
  193. table.insert(table.end(), 32, TokenKind::Semi);
  194. table.insert(table.end(), 16, TokenKind::Comma);
  195. table.insert(table.end(), 12, TokenKind::Period);
  196. table.insert(table.end(), 8, TokenKind::Colon);
  197. table.insert(table.end(), 8, TokenKind::Equal);
  198. table.insert(table.end(), 4, TokenKind::Amp);
  199. table.insert(table.end(), 4, TokenKind::ColonExclaim);
  200. table.insert(table.end(), 4, TokenKind::EqualEqual);
  201. table.insert(table.end(), 4, TokenKind::ExclaimEqual);
  202. table.insert(table.end(), 4, TokenKind::MinusGreater);
  203. table.insert(table.end(), 4, TokenKind::Star);
  204. return table;
  205. }();
  206. return symbol_token_table_storage;
  207. }
  208. // Compute a random sequence of mixed symbols, keywords, and identifiers, with
  209. // percentages of each according to the parameters.
  210. auto RandomMixedSeq(int symbol_percent, int keyword_percent) -> std::string {
  211. CARBON_CHECK(0 <= symbol_percent && symbol_percent <= 100)
  212. << "Must be a percent: [0, 100].";
  213. CARBON_CHECK(0 <= keyword_percent && keyword_percent <= 100)
  214. << "Must be a percent: [0, 100].";
  215. CARBON_CHECK((symbol_percent + keyword_percent) <= 100)
  216. << "Cannot have >100%.";
  217. static_assert((NumTokens % 100) == 0,
  218. "The number of tokens must be divisible by 100 so that we can "
  219. "easily scale integer percentages up to it.");
  220. // Get static pools of symbols, keywords, and identifiers.
  221. llvm::ArrayRef<TokenKind> symbols = GetSymbolTokenTable();
  222. llvm::ArrayRef<TokenKind> keywords = TokenKind::KeywordTokens;
  223. const std::array<std::string, NumTokens>& ids = GetRandomIdentifiers();
  224. // Build a list of StringRefs from the different types with the desired
  225. // distribution, then shuffle that list.
  226. std::array<llvm::StringRef, NumTokens> tokens;
  227. int num_symbols = (NumTokens / 100) * symbol_percent;
  228. int num_keywords = (NumTokens / 100) * keyword_percent;
  229. int num_identifiers = NumTokens - num_symbols - num_keywords;
  230. CARBON_CHECK(num_identifiers == 0 || num_identifiers > 500)
  231. << "We require at least 500 identifiers as we need to collect a "
  232. "reasonable number of samples to end up with a reasonable "
  233. "distribution of lengths.";
  234. for (int i : llvm::seq(num_symbols)) {
  235. tokens[i] = symbols[i % symbols.size()].fixed_spelling();
  236. }
  237. for (int i : llvm::seq(num_keywords)) {
  238. tokens[num_symbols + i] = keywords[i % keywords.size()].fixed_spelling();
  239. }
  240. for (int i : llvm::seq(num_identifiers)) {
  241. // We always have enough identifiers, so no need to mod here.
  242. tokens[num_symbols + num_keywords + i] = ids[i];
  243. }
  244. std::shuffle(tokens.begin(), tokens.end(), absl::BitGen());
  245. return llvm::join(tokens, " ");
  246. }
  247. class LexerBenchHelper {
  248. public:
  249. explicit LexerBenchHelper(llvm::StringRef text)
  250. : source_(MakeSourceBuffer(text)) {}
  251. auto Lex() -> TokenizedBuffer {
  252. DiagnosticConsumer& consumer = NullDiagnosticConsumer();
  253. return TokenizedBuffer::Lex(source_, consumer);
  254. }
  255. auto DiagnoseErrors() -> std::string {
  256. std::string result;
  257. llvm::raw_string_ostream out(result);
  258. StreamDiagnosticConsumer consumer(out);
  259. auto buffer = TokenizedBuffer::Lex(source_, consumer);
  260. consumer.Flush();
  261. CARBON_CHECK(buffer.has_errors())
  262. << "Asked to diagnose errors but none found!";
  263. return result;
  264. }
  265. private:
  266. auto MakeSourceBuffer(llvm::StringRef text) -> SourceBuffer {
  267. CARBON_CHECK(fs_.addFile(filename_, /*ModificationTime=*/0,
  268. llvm::MemoryBuffer::getMemBuffer(text)));
  269. return std::move(*SourceBuffer::CreateFromFile(
  270. fs_, filename_, ConsoleDiagnosticConsumer()));
  271. }
  272. llvm::vfs::InMemoryFileSystem fs_;
  273. std::string filename_ = "test.carbon";
  274. SourceBuffer source_;
  275. };
  276. void BM_ValidKeywords(benchmark::State& state) {
  277. absl::BitGen gen;
  278. std::array<llvm::StringRef, NumTokens> tokens;
  279. for (int i : llvm::seq(NumTokens)) {
  280. tokens[i] = TokenKind::KeywordTokens[i % TokenKind::KeywordTokens.size()]
  281. .fixed_spelling();
  282. }
  283. std::shuffle(tokens.begin(), tokens.end(), gen);
  284. std::string source = llvm::join(tokens, " ");
  285. LexerBenchHelper helper(source);
  286. for (auto _ : state) {
  287. TokenizedBuffer buffer = helper.Lex();
  288. CARBON_CHECK(!buffer.has_errors());
  289. }
  290. state.SetBytesProcessed(state.iterations() * source.size());
  291. state.counters["tokens_per_second"] = benchmark::Counter(
  292. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  293. }
  294. BENCHMARK(BM_ValidKeywords);
  295. template <int MinLength, int MaxLength, bool Uniform>
  296. void BM_ValidIdentifiers(benchmark::State& state) {
  297. std::string source = RandomIdentifierSeq<MinLength, MaxLength, Uniform>();
  298. LexerBenchHelper helper(source);
  299. for (auto _ : state) {
  300. TokenizedBuffer buffer = helper.Lex();
  301. CARBON_CHECK(!buffer.has_errors()) << helper.DiagnoseErrors();
  302. }
  303. state.SetBytesProcessed(state.iterations() * source.size());
  304. state.counters["tokens_per_second"] = benchmark::Counter(
  305. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  306. }
  307. // Benchmark the non-uniform distribution we observe in C++ code.
  308. BENCHMARK(BM_ValidIdentifiers<1, 64, /*Uniform=*/false>);
  309. // Also benchmark a few uniform distribution ranges of identifier widths to
  310. // cover different patterns that emerge with small, medium, and longer
  311. // identifiers.
  312. BENCHMARK(BM_ValidIdentifiers<1, 1, /*Uniform=*/true>);
  313. BENCHMARK(BM_ValidIdentifiers<3, 5, /*Uniform=*/true>);
  314. BENCHMARK(BM_ValidIdentifiers<3, 16, /*Uniform=*/true>);
  315. BENCHMARK(BM_ValidIdentifiers<12, 64, /*Uniform=*/true>);
  316. void BM_ValidMix(benchmark::State& state) {
  317. int symbol_percent = state.range(0);
  318. int keyword_percent = state.range(1);
  319. std::string source = RandomMixedSeq(symbol_percent, keyword_percent);
  320. LexerBenchHelper helper(source);
  321. for (auto _ : state) {
  322. TokenizedBuffer buffer = helper.Lex();
  323. // Ensure that lexing actually occurs for benchmarking and that it doesn't
  324. // hit errors that would skew the benchmark results.
  325. CARBON_CHECK(!buffer.has_errors()) << helper.DiagnoseErrors();
  326. }
  327. state.SetBytesProcessed(state.iterations() * source.size());
  328. state.counters["tokens_per_second"] = benchmark::Counter(
  329. NumTokens, benchmark::Counter::kIsIterationInvariantRate);
  330. }
  331. // The distributions between symbols, keywords, and identifiers here are
  332. // guesses. Eventually, we should collect more data to help tune these, but
  333. // hopefully the performance isn't too sensitive and we can just cover a wide
  334. // range here.
  335. BENCHMARK(BM_ValidMix)
  336. ->Args({10, 40})
  337. ->Args({25, 30})
  338. ->Args({50, 20})
  339. ->Args({75, 10});
  340. } // namespace
  341. } // namespace Carbon::Lex