lex.cpp 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/lex/lex.h"
  5. #include <array>
  6. #include "common/check.h"
  7. #include "common/variant_helpers.h"
  8. #include "llvm/ADT/StringRef.h"
  9. #include "llvm/ADT/StringSwitch.h"
  10. #include "llvm/Support/Compiler.h"
  11. #include "toolchain/base/value_store.h"
  12. #include "toolchain/lex/character_set.h"
  13. #include "toolchain/lex/helpers.h"
  14. #include "toolchain/lex/numeric_literal.h"
  15. #include "toolchain/lex/string_literal.h"
  16. #include "toolchain/lex/token_kind.h"
  17. #include "toolchain/lex/tokenized_buffer.h"
  18. #if __ARM_NEON
  19. #include <arm_neon.h>
  20. #define CARBON_USE_SIMD 1
  21. #elif __x86_64__
  22. #include <x86intrin.h>
  23. #define CARBON_USE_SIMD 1
  24. #else
  25. #define CARBON_USE_SIMD 0
  26. #endif
  27. namespace Carbon::Lex {
  28. // Implementation of the lexer logic itself.
  29. //
  30. // The design is that lexing can loop over the source buffer, consuming it into
  31. // tokens by calling into this API. This class handles the state and breaks down
  32. // the different lexing steps that may be used. It directly updates the provided
  33. // tokenized buffer with the lexed tokens.
  34. //
  35. // We'd typically put this in an anonymous namespace, but it is `friend`-ed by
  36. // the `TokenizedBuffer`. One of the important benefits of being in an anonymous
  37. // namespace is having internal linkage. That allows the optimizer to much more
  38. // aggressively inline away functions that are called in only one place. We keep
  39. // that benefit for now by using the `internal_linkage` attribute.
  40. //
  41. // TODO: Investigate ways to refactor the code that allow moving this into an
  42. // anonymous namespace without overly exposing implementation details of the
  43. // `TokenizedBuffer` or undermining the performance constraints of the lexer.
  44. class [[clang::internal_linkage]] Lexer {
  45. public:
  46. // Symbolic result of a lexing action. This indicates whether we successfully
  47. // lexed a token, or whether other lexing actions should be attempted.
  48. //
  49. // While it wraps a simple boolean state, its API both helps make the failures
  50. // more self documenting, and by consuming the actual token constructively
  51. // when one is produced, it helps ensure the correct result is returned.
  52. class LexResult {
  53. public:
  54. // Consumes (and discard) a valid token to construct a result
  55. // indicating a token has been produced. Relies on implicit conversions.
  56. // NOLINTNEXTLINE(google-explicit-constructor)
  57. LexResult(TokenIndex /*discarded_token*/) : LexResult(true) {}
  58. // Returns a result indicating no token was produced.
  59. static auto NoMatch() -> LexResult { return LexResult(false); }
  60. // Tests whether a token was produced by the lexing routine, and
  61. // the lexer can continue forming tokens.
  62. explicit operator bool() const { return formed_token_; }
  63. private:
  64. explicit LexResult(bool formed_token) : formed_token_(formed_token) {}
  65. bool formed_token_;
  66. };
  67. Lexer(SharedValueStores& value_stores, SourceBuffer& source,
  68. DiagnosticConsumer& consumer)
  69. : buffer_(value_stores, source),
  70. consumer_(consumer),
  71. converter_(&buffer_),
  72. emitter_(converter_, consumer_),
  73. token_converter_(&buffer_),
  74. token_emitter_(token_converter_, consumer_) {}
  75. // Find all line endings and create the line data structures.
  76. //
  77. // Explicitly kept out-of-line because this is a significant loop that is
  78. // useful to have in the profile and it doesn't simplify by inlining at all.
  79. // But because it can, the compiler will flatten this otherwise.
  80. [[gnu::noinline]] auto MakeLines(llvm::StringRef source_text) -> void;
  81. auto current_line() -> LineIndex { return LineIndex(line_index_); }
  82. auto current_line_info() -> TokenizedBuffer::LineInfo* {
  83. return &buffer_.line_infos_[line_index_];
  84. }
  85. auto ComputeColumn(ssize_t position) -> int {
  86. CARBON_DCHECK(position >= current_line_info()->start);
  87. return position - current_line_info()->start;
  88. }
  89. auto NoteWhitespace() -> void {
  90. buffer_.token_infos_.back().has_trailing_space = true;
  91. }
  92. auto SkipHorizontalWhitespace(llvm::StringRef source_text, ssize_t& position)
  93. -> void;
  94. auto LexHorizontalWhitespace(llvm::StringRef source_text, ssize_t& position)
  95. -> void;
  96. auto LexVerticalWhitespace(llvm::StringRef source_text, ssize_t& position)
  97. -> void;
  98. auto LexCommentOrSlash(llvm::StringRef source_text, ssize_t& position)
  99. -> void;
  100. auto LexComment(llvm::StringRef source_text, ssize_t& position) -> void;
  101. auto LexNumericLiteral(llvm::StringRef source_text, ssize_t& position)
  102. -> LexResult;
  103. auto LexStringLiteral(llvm::StringRef source_text, ssize_t& position)
  104. -> LexResult;
  105. auto LexOneCharSymbolToken(llvm::StringRef source_text, TokenKind kind,
  106. ssize_t& position) -> TokenIndex;
  107. auto LexOpeningSymbolToken(llvm::StringRef source_text, TokenKind kind,
  108. ssize_t& position) -> LexResult;
  109. auto LexClosingSymbolToken(llvm::StringRef source_text, TokenKind kind,
  110. ssize_t& position) -> LexResult;
  111. auto LexSymbolToken(llvm::StringRef source_text, ssize_t& position)
  112. -> LexResult;
  113. // Given a word that has already been lexed, determine whether it is a type
  114. // literal and if so form the corresponding token.
  115. auto LexWordAsTypeLiteralToken(llvm::StringRef word, int column) -> LexResult;
  116. auto LexKeywordOrIdentifier(llvm::StringRef source_text, ssize_t& position)
  117. -> LexResult;
  118. auto LexHash(llvm::StringRef source_text, ssize_t& position) -> LexResult;
  119. auto LexError(llvm::StringRef source_text, ssize_t& position) -> LexResult;
  120. auto LexFileStart(llvm::StringRef source_text, ssize_t& position) -> void;
  121. auto LexFileEnd(llvm::StringRef source_text, ssize_t position) -> void;
  122. auto DiagnoseAndFixMismatchedBrackets() -> void;
  123. // The main entry point for dispatching through the lexer's table. This method
  124. // should always fully consume the source text.
  125. auto Lex() && -> TokenizedBuffer;
  126. private:
  127. class ErrorRecoveryBuffer;
  128. TokenizedBuffer buffer_;
  129. ssize_t line_index_;
  130. llvm::SmallVector<TokenIndex> open_groups_;
  131. bool has_mismatched_brackets_ = false;
  132. ErrorTrackingDiagnosticConsumer consumer_;
  133. TokenizedBuffer::SourceBufferDiagnosticConverter converter_;
  134. LexerDiagnosticEmitter emitter_;
  135. TokenDiagnosticConverter token_converter_;
  136. TokenDiagnosticEmitter token_emitter_;
  137. };
  138. #if CARBON_USE_SIMD
  139. namespace {
  140. #if __ARM_NEON
  141. using SIMDMaskT = uint8x16_t;
  142. #elif __x86_64__
  143. using SIMDMaskT = __m128i;
  144. #else
  145. #error "Unsupported SIMD architecture!"
  146. #endif
  147. using SIMDMaskArrayT = std::array<SIMDMaskT, sizeof(SIMDMaskT) + 1>;
  148. } // namespace
  149. // A table of masks to include 0-16 bytes of an SSE register.
  150. static constexpr SIMDMaskArrayT PrefixMasks = []() constexpr {
  151. SIMDMaskArrayT masks = {};
  152. for (int i = 1; i < static_cast<int>(masks.size()); ++i) {
  153. masks[i] =
  154. // The SIMD types and constexpr require a C-style cast.
  155. // NOLINTNEXTLINE(google-readability-casting)
  156. (SIMDMaskT)(std::numeric_limits<unsigned __int128>::max() >>
  157. ((sizeof(SIMDMaskT) - i) * 8));
  158. }
  159. return masks;
  160. }();
  161. #endif // CARBON_USE_SIMD
  162. // A table of booleans that we can use to classify bytes as being valid
  163. // identifier start. This is used by raw identifier detection.
  164. static constexpr std::array<bool, 256> IsIdStartByteTable = [] {
  165. std::array<bool, 256> table = {};
  166. for (char c = 'A'; c <= 'Z'; ++c) {
  167. table[c] = true;
  168. }
  169. for (char c = 'a'; c <= 'z'; ++c) {
  170. table[c] = true;
  171. }
  172. table['_'] = true;
  173. return table;
  174. }();
  175. // A table of booleans that we can use to classify bytes as being valid
  176. // identifier (or keyword) characters. This is used in the generic,
  177. // non-vectorized fallback code to scan for length of an identifier.
  178. static constexpr std::array<bool, 256> IsIdByteTable = [] {
  179. std::array<bool, 256> table = IsIdStartByteTable;
  180. for (char c = '0'; c <= '9'; ++c) {
  181. table[c] = true;
  182. }
  183. return table;
  184. }();
  185. // Baseline scalar version, also available for scalar-fallback in SIMD code.
  186. // Uses `ssize_t` for performance when indexing in the loop.
  187. //
  188. // TODO: This assumes all Unicode characters are non-identifiers.
  189. static auto ScanForIdentifierPrefixScalar(llvm::StringRef text, ssize_t i)
  190. -> llvm::StringRef {
  191. const ssize_t size = text.size();
  192. while (i < size && IsIdByteTable[static_cast<unsigned char>(text[i])]) {
  193. ++i;
  194. }
  195. return text.substr(0, i);
  196. }
  197. #if CARBON_USE_SIMD && __x86_64__
  198. // The SIMD code paths uses a scheme derived from the techniques in Geoff
  199. // Langdale and Daniel Lemire's work on parsing JSON[1]. Specifically, that
  200. // paper outlines a technique of using two 4-bit indexed in-register look-up
  201. // tables (LUTs) to classify bytes in a branchless SIMD code sequence.
  202. //
  203. // [1]: https://arxiv.org/pdf/1902.08318.pdf
  204. //
  205. // The goal is to get a bit mask classifying different sets of bytes. For each
  206. // input byte, we first test for a high bit indicating a UTF-8 encoded Unicode
  207. // character. Otherwise, we want the mask bits to be set with the following
  208. // logic derived by inspecting the high nibble and low nibble of the input:
  209. // bit0 = 1 for `_`: high `0x5` and low `0xF`
  210. // bit1 = 1 for `0-9`: high `0x3` and low `0x0` - `0x9`
  211. // bit2 = 1 for `A-O` and `a-o`: high `0x4` or `0x6` and low `0x1` - `0xF`
  212. // bit3 = 1 for `P-Z` and 'p-z': high `0x5` or `0x7` and low `0x0` - `0xA`
  213. // bit4 = unused
  214. // bit5 = unused
  215. // bit6 = unused
  216. // bit7 = unused
  217. //
  218. // No bits set means definitively non-ID ASCII character.
  219. //
  220. // Bits 4-7 remain unused if we need to classify more characters.
  221. namespace {
  222. // Struct used to implement the nibble LUT for SIMD implementations.
  223. //
  224. // Forced to 16-byte alignment to ensure we can load it easily in SIMD code.
  225. struct alignas(16) NibbleLUT {
  226. auto Load() const -> __m128i {
  227. return _mm_load_si128(reinterpret_cast<const __m128i*>(this));
  228. }
  229. uint8_t nibble_0;
  230. uint8_t nibble_1;
  231. uint8_t nibble_2;
  232. uint8_t nibble_3;
  233. uint8_t nibble_4;
  234. uint8_t nibble_5;
  235. uint8_t nibble_6;
  236. uint8_t nibble_7;
  237. uint8_t nibble_8;
  238. uint8_t nibble_9;
  239. uint8_t nibble_a;
  240. uint8_t nibble_b;
  241. uint8_t nibble_c;
  242. uint8_t nibble_d;
  243. uint8_t nibble_e;
  244. uint8_t nibble_f;
  245. };
  246. } // namespace
  247. static constexpr NibbleLUT HighLUT = {
  248. .nibble_0 = 0b0000'0000,
  249. .nibble_1 = 0b0000'0000,
  250. .nibble_2 = 0b0000'0000,
  251. .nibble_3 = 0b0000'0010,
  252. .nibble_4 = 0b0000'0100,
  253. .nibble_5 = 0b0000'1001,
  254. .nibble_6 = 0b0000'0100,
  255. .nibble_7 = 0b0000'1000,
  256. .nibble_8 = 0b1000'0000,
  257. .nibble_9 = 0b1000'0000,
  258. .nibble_a = 0b1000'0000,
  259. .nibble_b = 0b1000'0000,
  260. .nibble_c = 0b1000'0000,
  261. .nibble_d = 0b1000'0000,
  262. .nibble_e = 0b1000'0000,
  263. .nibble_f = 0b1000'0000,
  264. };
  265. static constexpr NibbleLUT LowLUT = {
  266. .nibble_0 = 0b1000'1010,
  267. .nibble_1 = 0b1000'1110,
  268. .nibble_2 = 0b1000'1110,
  269. .nibble_3 = 0b1000'1110,
  270. .nibble_4 = 0b1000'1110,
  271. .nibble_5 = 0b1000'1110,
  272. .nibble_6 = 0b1000'1110,
  273. .nibble_7 = 0b1000'1110,
  274. .nibble_8 = 0b1000'1110,
  275. .nibble_9 = 0b1000'1110,
  276. .nibble_a = 0b1000'1100,
  277. .nibble_b = 0b1000'0100,
  278. .nibble_c = 0b1000'0100,
  279. .nibble_d = 0b1000'0100,
  280. .nibble_e = 0b1000'0100,
  281. .nibble_f = 0b1000'0101,
  282. };
  283. static auto ScanForIdentifierPrefixX86(llvm::StringRef text)
  284. -> llvm::StringRef {
  285. const auto high_lut = HighLUT.Load();
  286. const auto low_lut = LowLUT.Load();
  287. // Use `ssize_t` for performance here as we index memory in a tight loop.
  288. ssize_t i = 0;
  289. const ssize_t size = text.size();
  290. while ((i + 16) <= size) {
  291. __m128i input =
  292. _mm_loadu_si128(reinterpret_cast<const __m128i*>(text.data() + i));
  293. // The high bits of each byte indicate a non-ASCII character encoded using
  294. // UTF-8. Test those and fall back to the scalar code if present. These
  295. // bytes will also cause spurious zeros in the LUT results, but we can
  296. // ignore that because we track them independently here.
  297. #if __SSE4_1__
  298. if (!_mm_test_all_zeros(_mm_set1_epi8(0x80), input)) {
  299. break;
  300. }
  301. #else
  302. if (_mm_movemask_epi8(input) != 0) {
  303. break;
  304. }
  305. #endif
  306. // Do two LUT lookups and mask the results together to get the results for
  307. // both low and high nibbles. Note that we don't need to mask out the high
  308. // bit of input here because we track that above for UTF-8 handling.
  309. __m128i low_mask = _mm_shuffle_epi8(low_lut, input);
  310. // Note that the input needs to be masked to only include the high nibble or
  311. // we could end up with bit7 set forcing the result to a zero byte.
  312. __m128i input_high =
  313. _mm_and_si128(_mm_srli_epi32(input, 4), _mm_set1_epi8(0x0f));
  314. __m128i high_mask = _mm_shuffle_epi8(high_lut, input_high);
  315. __m128i mask = _mm_and_si128(low_mask, high_mask);
  316. // Now compare to find the completely zero bytes.
  317. __m128i id_byte_mask_vec = _mm_cmpeq_epi8(mask, _mm_setzero_si128());
  318. int tail_ascii_mask = _mm_movemask_epi8(id_byte_mask_vec);
  319. // Check if there are bits in the tail mask, which means zero bytes and the
  320. // end of the identifier. We could do this without materializing the scalar
  321. // mask on more recent CPUs, but we generally expect the median length we
  322. // encounter to be <16 characters and so we avoid the extra instruction in
  323. // that case and predict this branch to succeed so it is laid out in a
  324. // reasonable way.
  325. if (LLVM_LIKELY(tail_ascii_mask != 0)) {
  326. // Move past the definitively classified bytes that are part of the
  327. // identifier, and return the complete identifier text.
  328. i += __builtin_ctz(tail_ascii_mask);
  329. return text.substr(0, i);
  330. }
  331. i += 16;
  332. }
  333. return ScanForIdentifierPrefixScalar(text, i);
  334. }
  335. #endif // CARBON_USE_SIMD && __x86_64__
  336. // Scans the provided text and returns the prefix `StringRef` of contiguous
  337. // identifier characters.
  338. //
  339. // This is a performance sensitive function and where profitable uses vectorized
  340. // code sequences to optimize its scanning. When modifying, the identifier
  341. // lexing benchmarks should be checked for regressions.
  342. //
  343. // Identifier characters here are currently the ASCII characters `[0-9A-Za-z_]`.
  344. //
  345. // TODO: Currently, this code does not implement Carbon's design for Unicode
  346. // characters in identifiers. It does work on UTF-8 code unit sequences, but
  347. // currently considers non-ASCII characters to be non-identifier characters.
  348. // Some work has been done to ensure the hot loop, while optimized, retains
  349. // enough information to add Unicode handling without completely destroying the
  350. // relevant optimizations.
  351. static auto ScanForIdentifierPrefix(llvm::StringRef text) -> llvm::StringRef {
  352. // Dispatch to an optimized architecture optimized routine.
  353. #if CARBON_USE_SIMD && __x86_64__
  354. return ScanForIdentifierPrefixX86(text);
  355. #elif CARBON_USE_SIMD && __ARM_NEON
  356. // Somewhat surprisingly, there is basically nothing worth doing in SIMD on
  357. // Arm to optimize this scan. The Neon SIMD operations end up requiring you to
  358. // move from the SIMD unit to the scalar unit in the critical path of finding
  359. // the offset of the end of an identifier. Current ARM cores make the code
  360. // sequences here (quite) unpleasant. For example, on Apple M1 and similar
  361. // cores, the latency is as much as 10 cycles just to extract from the vector.
  362. // SIMD might be more interesting on Neoverse cores, but it'd be nice to avoid
  363. // core-specific tunings at this point.
  364. //
  365. // If this proves problematic and critical to optimize, the current leading
  366. // theory is to have the newline searching code also create a bitmask for the
  367. // entire source file of identifier and non-identifier bytes, and then use the
  368. // bit-counting instructions here to do a fast scan of that bitmask. However,
  369. // crossing that bridge will add substantial complexity to the newline
  370. // scanner, and so currently we just use a boring scalar loop that pipelines
  371. // well.
  372. #endif
  373. return ScanForIdentifierPrefixScalar(text, 0);
  374. }
  375. using DispatchFunctionT = auto(Lexer& lexer, llvm::StringRef source_text,
  376. ssize_t position) -> void;
  377. using DispatchTableT = std::array<DispatchFunctionT*, 256>;
  378. static constexpr std::array<TokenKind, 256> OneCharTokenKindTable = [] {
  379. std::array<TokenKind, 256> table = {};
  380. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling) \
  381. table[(Spelling)[0]] = TokenKind::TokenName;
  382. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName) \
  383. table[(Spelling)[0]] = TokenKind::TokenName;
  384. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName) \
  385. table[(Spelling)[0]] = TokenKind::TokenName;
  386. #include "toolchain/lex/token_kind.def"
  387. return table;
  388. }();
  389. // We use a collection of static member functions for table-based dispatch to
  390. // lexer methods. These are named static member functions so that they show up
  391. // helpfully in profiles and backtraces, but they tend to not contain the
  392. // interesting logic and simply delegate to the relevant methods. All of their
  393. // signatures need to be exactly the same however in order to ensure we can
  394. // build efficient dispatch tables out of them. All of them end by doing a
  395. // must-tail return call to this routine. It handles continuing the dispatch
  396. // chain.
  397. static auto DispatchNext(Lexer& lexer, llvm::StringRef source_text,
  398. ssize_t position) -> void;
  399. // Define a set of dispatch functions that simply forward to a method that
  400. // lexes a token. This includes validating that an actual token was produced,
  401. // and continuing the dispatch.
  402. #define CARBON_DISPATCH_LEX_TOKEN(LexMethod) \
  403. static auto Dispatch##LexMethod(Lexer& lexer, llvm::StringRef source_text, \
  404. ssize_t position) -> void { \
  405. Lexer::LexResult result = lexer.LexMethod(source_text, position); \
  406. CARBON_CHECK(result) << "Failed to form a token!"; \
  407. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  408. }
  409. CARBON_DISPATCH_LEX_TOKEN(LexError)
  410. CARBON_DISPATCH_LEX_TOKEN(LexSymbolToken)
  411. CARBON_DISPATCH_LEX_TOKEN(LexKeywordOrIdentifier)
  412. CARBON_DISPATCH_LEX_TOKEN(LexHash)
  413. CARBON_DISPATCH_LEX_TOKEN(LexNumericLiteral)
  414. CARBON_DISPATCH_LEX_TOKEN(LexStringLiteral)
  415. // A custom dispatch functions that pre-select the symbol token to lex.
  416. #define CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexMethod) \
  417. static auto Dispatch##LexMethod##SymbolToken( \
  418. Lexer& lexer, llvm::StringRef source_text, ssize_t position) -> void { \
  419. Lexer::LexResult result = lexer.LexMethod##SymbolToken( \
  420. source_text, \
  421. OneCharTokenKindTable[static_cast<unsigned char>( \
  422. source_text[position])], \
  423. position); \
  424. CARBON_CHECK(result) << "Failed to form a token!"; \
  425. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  426. }
  427. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexOneChar)
  428. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexOpening)
  429. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexClosing)
  430. // Define a set of non-token dispatch functions that handle things like
  431. // whitespace and comments.
  432. #define CARBON_DISPATCH_LEX_NON_TOKEN(LexMethod) \
  433. static auto Dispatch##LexMethod(Lexer& lexer, llvm::StringRef source_text, \
  434. ssize_t position) -> void { \
  435. lexer.LexMethod(source_text, position); \
  436. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  437. }
  438. CARBON_DISPATCH_LEX_NON_TOKEN(LexHorizontalWhitespace)
  439. CARBON_DISPATCH_LEX_NON_TOKEN(LexVerticalWhitespace)
  440. CARBON_DISPATCH_LEX_NON_TOKEN(LexCommentOrSlash)
  441. // Build a table of function pointers that we can use to dispatch to the
  442. // correct lexer routine based on the first byte of source text.
  443. //
  444. // While it is tempting to simply use a `switch` on the first byte and
  445. // dispatch with cases into this, in practice that doesn't produce great code.
  446. // There seem to be two issues that are the root cause.
  447. //
  448. // First, there are lots of different values of bytes that dispatch to a
  449. // fairly small set of routines, and then some byte values that dispatch
  450. // differently for each byte. This pattern isn't one that the compiler-based
  451. // lowering of switches works well with -- it tries to balance all the cases,
  452. // and in doing so emits several compares and other control flow rather than a
  453. // simple jump table.
  454. //
  455. // Second, with a `case`, it isn't as obvious how to create a single, uniform
  456. // interface that is effective for *every* byte value, and thus makes for a
  457. // single consistent table-based dispatch. By forcing these to be function
  458. // pointers, we also coerce the code to use a strictly homogeneous structure
  459. // that can form a single dispatch table.
  460. //
  461. // These two actually interact -- the second issue is part of what makes the
  462. // non-table lowering in the first one desirable for many switches and cases.
  463. //
  464. // Ultimately, when table-based dispatch is such an important technique, we
  465. // get better results by taking full control and manually creating the
  466. // dispatch structures.
  467. //
  468. // The functions in this table also use tail-recursion to implement the loop
  469. // of the lexer. This is based on the technique described more fully for any
  470. // kind of byte-stream loop structure here:
  471. // https://blog.reverberate.org/2021/04/21/musttail-efficient-interpreters.html
  472. static constexpr auto MakeDispatchTable() -> DispatchTableT {
  473. DispatchTableT table = {};
  474. // First set the table entries to dispatch to our error token handler as the
  475. // base case. Everything valid comes from an override below.
  476. for (int i = 0; i < 256; ++i) {
  477. table[i] = &DispatchLexError;
  478. }
  479. // Symbols have some special dispatching. First, set the first character of
  480. // each symbol token spelling to dispatch to the symbol lexer. We don't
  481. // provide a pre-computed token here, so the symbol lexer will compute the
  482. // exact symbol token kind. We'll override this with more specific dispatch
  483. // below.
  484. #define CARBON_SYMBOL_TOKEN(TokenName, Spelling) \
  485. table[(Spelling)[0]] = &DispatchLexSymbolToken;
  486. #include "toolchain/lex/token_kind.def"
  487. // Now special cased single-character symbols that are guaranteed to not
  488. // join with another symbol. These are grouping symbols, terminators,
  489. // or separators in the grammar and have a good reason to be
  490. // orthogonal to any other punctuation. We do this separately because this
  491. // needs to override some of the generic handling above, and provide a
  492. // custom token.
  493. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling) \
  494. table[(Spelling)[0]] = &DispatchLexOneCharSymbolToken;
  495. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName) \
  496. table[(Spelling)[0]] = &DispatchLexOpeningSymbolToken;
  497. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName) \
  498. table[(Spelling)[0]] = &DispatchLexClosingSymbolToken;
  499. #include "toolchain/lex/token_kind.def"
  500. // Override the handling for `/` to consider comments as well as a `/`
  501. // symbol.
  502. table['/'] = &DispatchLexCommentOrSlash;
  503. table['_'] = &DispatchLexKeywordOrIdentifier;
  504. // Note that we don't use `llvm::seq` because this needs to be `constexpr`
  505. // evaluated.
  506. for (unsigned char c = 'a'; c <= 'z'; ++c) {
  507. table[c] = &DispatchLexKeywordOrIdentifier;
  508. }
  509. for (unsigned char c = 'A'; c <= 'Z'; ++c) {
  510. table[c] = &DispatchLexKeywordOrIdentifier;
  511. }
  512. // We dispatch all non-ASCII UTF-8 characters to the identifier lexing
  513. // as whitespace characters should already have been skipped and the
  514. // only remaining valid Unicode characters would be part of an
  515. // identifier. That code can either accept or reject.
  516. for (int i = 0x80; i < 0x100; ++i) {
  517. table[i] = &DispatchLexKeywordOrIdentifier;
  518. }
  519. for (unsigned char c = '0'; c <= '9'; ++c) {
  520. table[c] = &DispatchLexNumericLiteral;
  521. }
  522. table['\''] = &DispatchLexStringLiteral;
  523. table['"'] = &DispatchLexStringLiteral;
  524. table['#'] = &DispatchLexHash;
  525. table[' '] = &DispatchLexHorizontalWhitespace;
  526. table['\t'] = &DispatchLexHorizontalWhitespace;
  527. table['\n'] = &DispatchLexVerticalWhitespace;
  528. return table;
  529. }
  530. static constexpr DispatchTableT DispatchTable = MakeDispatchTable();
  531. static auto DispatchNext(Lexer& lexer, llvm::StringRef source_text,
  532. ssize_t position) -> void {
  533. if (LLVM_LIKELY(position < static_cast<ssize_t>(source_text.size()))) {
  534. // The common case is to tail recurse based on the next character. Note
  535. // that because this is a must-tail return, this cannot fail to tail-call
  536. // and will not grow the stack. This is in essence a loop with dynamic
  537. // tail dispatch to the next stage of the loop.
  538. [[clang::musttail]] return DispatchTable[static_cast<unsigned char>(
  539. source_text[position])](lexer, source_text, position);
  540. }
  541. // When we finish the source text, stop recursing. We also hint this so that
  542. // the tail-dispatch is optimized as that's essentially the loop back-edge
  543. // and this is the loop exit.
  544. lexer.LexFileEnd(source_text, position);
  545. }
  546. auto Lexer::Lex() && -> TokenizedBuffer {
  547. llvm::StringRef source_text = buffer_.source_->text();
  548. // First build up our line data structures.
  549. MakeLines(source_text);
  550. ssize_t position = 0;
  551. LexFileStart(source_text, position);
  552. // Manually enter the dispatch loop. This call will tail-recurse through the
  553. // dispatch table until everything from source_text is consumed.
  554. DispatchNext(*this, source_text, position);
  555. if (consumer_.seen_error()) {
  556. buffer_.has_errors_ = true;
  557. }
  558. return std::move(buffer_);
  559. }
  560. auto Lexer::MakeLines(llvm::StringRef source_text) -> void {
  561. // We currently use `memchr` here which typically is well optimized to use
  562. // SIMD or other significantly faster than byte-wise scanning. We also use
  563. // carefully selected variables and the `ssize_t` type for performance and
  564. // code size of this hot loop.
  565. //
  566. // TODO: Eventually, we'll likely need to roll our own SIMD-optimized
  567. // routine here in order to handle CR+LF line endings, as we'll want those
  568. // to stay on the fast path. We'll also need to detect and diagnose Unicode
  569. // vertical whitespace. Starting with `memchr` should give us a strong
  570. // baseline performance target when adding those features.
  571. const char* const text = source_text.data();
  572. const ssize_t size = source_text.size();
  573. ssize_t start = 0;
  574. while (const char* nl = reinterpret_cast<const char*>(
  575. memchr(&text[start], '\n', size - start))) {
  576. ssize_t nl_index = nl - text;
  577. buffer_.AddLine(TokenizedBuffer::LineInfo(start, nl_index - start));
  578. start = nl_index + 1;
  579. }
  580. // The last line ends at the end of the file.
  581. buffer_.AddLine(TokenizedBuffer::LineInfo(start, size - start));
  582. // If the last line wasn't empty, the file ends with an unterminated line.
  583. // Add an extra blank line so that we never need to handle the special case
  584. // of being on the last line inside the lexer and needing to not increment
  585. // to the next line.
  586. if (start != size) {
  587. buffer_.AddLine(TokenizedBuffer::LineInfo(size, 0));
  588. }
  589. // Now that all the infos are allocated, get a fresh pointer to the first
  590. // info for use while lexing.
  591. line_index_ = 0;
  592. }
  593. auto Lexer::SkipHorizontalWhitespace(llvm::StringRef source_text,
  594. ssize_t& position) -> void {
  595. // Handle adjacent whitespace quickly. This comes up frequently for example
  596. // due to indentation. We don't expect *huge* runs, so just use a scalar
  597. // loop. While still scalar, this avoids repeated table dispatch and marking
  598. // whitespace.
  599. while (position < static_cast<ssize_t>(source_text.size()) &&
  600. (source_text[position] == ' ' || source_text[position] == '\t')) {
  601. ++position;
  602. }
  603. }
  604. auto Lexer::LexHorizontalWhitespace(llvm::StringRef source_text,
  605. ssize_t& position) -> void {
  606. CARBON_DCHECK(source_text[position] == ' ' || source_text[position] == '\t');
  607. NoteWhitespace();
  608. // Skip runs using an optimized code path.
  609. SkipHorizontalWhitespace(source_text, position);
  610. }
  611. auto Lexer::LexVerticalWhitespace(llvm::StringRef source_text,
  612. ssize_t& position) -> void {
  613. NoteWhitespace();
  614. ++line_index_;
  615. auto* line_info = current_line_info();
  616. ssize_t line_start = line_info->start;
  617. position = line_start;
  618. SkipHorizontalWhitespace(source_text, position);
  619. line_info->indent = position - line_start;
  620. }
  621. auto Lexer::LexCommentOrSlash(llvm::StringRef source_text, ssize_t& position)
  622. -> void {
  623. CARBON_DCHECK(source_text[position] == '/');
  624. // Both comments and slash symbols start with a `/`. We disambiguate with a
  625. // max-munch rule -- if the next character is another `/` then we lex it as
  626. // a comment start. If it isn't, then we lex as a slash. We also optimize
  627. // for the comment case as we expect that to be much more important for
  628. // overall lexer performance.
  629. if (LLVM_LIKELY(position + 1 < static_cast<ssize_t>(source_text.size()) &&
  630. source_text[position + 1] == '/')) {
  631. LexComment(source_text, position);
  632. return;
  633. }
  634. // This code path should produce a token, make sure that happens.
  635. LexResult result = LexSymbolToken(source_text, position);
  636. CARBON_CHECK(result) << "Failed to form a token!";
  637. }
  638. auto Lexer::LexComment(llvm::StringRef source_text, ssize_t& position) -> void {
  639. CARBON_DCHECK(source_text.substr(position).starts_with("//"));
  640. // Any comment must be the only non-whitespace on the line.
  641. const auto* line_info = current_line_info();
  642. if (LLVM_UNLIKELY(position != line_info->start + line_info->indent)) {
  643. CARBON_DIAGNOSTIC(TrailingComment, Error,
  644. "Trailing comments are not permitted.");
  645. emitter_.Emit(source_text.begin() + position, TrailingComment);
  646. // Note that we cannot fall-through here as the logic below doesn't handle
  647. // trailing comments. For simplicity, we just consume the trailing comment
  648. // itself and let the normal lexer handle the newline as if there weren't
  649. // a comment at all.
  650. position = line_info->start + line_info->length;
  651. return;
  652. }
  653. // The introducer '//' must be followed by whitespace or EOF.
  654. bool is_valid_after_slashes = true;
  655. if (position + 2 < static_cast<ssize_t>(source_text.size()) &&
  656. LLVM_UNLIKELY(!IsSpace(source_text[position + 2]))) {
  657. CARBON_DIAGNOSTIC(NoWhitespaceAfterCommentIntroducer, Error,
  658. "Whitespace is required after '//'.");
  659. emitter_.Emit(source_text.begin() + position + 2,
  660. NoWhitespaceAfterCommentIntroducer);
  661. // We use this to tweak the lexing of blocks below.
  662. is_valid_after_slashes = false;
  663. }
  664. // Skip over this line.
  665. ssize_t line_index = line_index_;
  666. ++line_index;
  667. position = buffer_.line_infos_[line_index].start;
  668. // A very common pattern is a long block of comment lines all with the same
  669. // indent and comment start. We skip these comment blocks in bulk both for
  670. // speed and to reduce redundant diagnostics if each line has the same
  671. // erroneous comment start like `//!`.
  672. //
  673. // When we have SIMD support this is even more important for speed, as short
  674. // indents can be scanned extremely quickly with SIMD and we expect these to
  675. // be the dominant cases.
  676. //
  677. // TODO: We should extend this to 32-byte SIMD on platforms with support.
  678. constexpr int MaxIndent = 13;
  679. const int indent = line_info->indent;
  680. const ssize_t first_line_start = line_info->start;
  681. ssize_t prefix_size = indent + (is_valid_after_slashes ? 3 : 2);
  682. auto skip_to_next_line = [this, indent, &line_index, &position] {
  683. // We're guaranteed to have a line here even on a comment on the last line
  684. // as we ensure there is an empty line structure at the end of every file.
  685. ++line_index;
  686. auto* next_line_info = &buffer_.line_infos_[line_index];
  687. next_line_info->indent = indent;
  688. position = next_line_info->start;
  689. };
  690. if (CARBON_USE_SIMD &&
  691. position + 16 < static_cast<ssize_t>(source_text.size()) &&
  692. indent <= MaxIndent) {
  693. // Load a mask based on the amount of text we want to compare.
  694. auto mask = PrefixMasks[prefix_size];
  695. #if __ARM_NEON
  696. // Load and mask the prefix of the current line.
  697. auto prefix = vld1q_u8(reinterpret_cast<const uint8_t*>(source_text.data() +
  698. first_line_start));
  699. prefix = vandq_u8(mask, prefix);
  700. do {
  701. // Load and mask the next line to consider's prefix.
  702. auto next_prefix = vld1q_u8(
  703. reinterpret_cast<const uint8_t*>(source_text.data() + position));
  704. next_prefix = vandq_u8(mask, next_prefix);
  705. // Compare the two prefixes and if any lanes differ, break.
  706. auto compare = vceqq_u8(prefix, next_prefix);
  707. if (vminvq_u8(compare) == 0) {
  708. break;
  709. }
  710. skip_to_next_line();
  711. } while (position + 16 < static_cast<ssize_t>(source_text.size()));
  712. #elif __x86_64__
  713. // Use the current line's prefix as the exemplar to compare against.
  714. // We don't mask here as we will mask when doing the comparison.
  715. auto prefix = _mm_loadu_si128(reinterpret_cast<const __m128i*>(
  716. source_text.data() + first_line_start));
  717. do {
  718. // Load the next line to consider's prefix.
  719. auto next_prefix = _mm_loadu_si128(
  720. reinterpret_cast<const __m128i*>(source_text.data() + position));
  721. // Compute the difference between the next line and our exemplar. Again,
  722. // we don't mask the difference because the comparison below will be
  723. // masked.
  724. auto prefix_diff = _mm_xor_si128(prefix, next_prefix);
  725. // If we have any differences (non-zero bits) within the mask, we can't
  726. // skip the next line too.
  727. if (!_mm_test_all_zeros(mask, prefix_diff)) {
  728. break;
  729. }
  730. skip_to_next_line();
  731. } while (position + 16 < static_cast<ssize_t>(source_text.size()));
  732. #else
  733. #error "Unsupported SIMD architecture!"
  734. #endif
  735. // TODO: If we finish the loop due to the position approaching the end of
  736. // the buffer we may fail to skip the last line in a comment block that
  737. // has an invalid initial sequence and thus emit extra diagnostics. We
  738. // should really fall through to the generic skipping logic, but the code
  739. // organization will need to change significantly to allow that.
  740. } else {
  741. while (position + prefix_size < static_cast<ssize_t>(source_text.size()) &&
  742. memcmp(source_text.data() + first_line_start,
  743. source_text.data() + position, prefix_size) == 0) {
  744. skip_to_next_line();
  745. }
  746. }
  747. // Now compute the indent of this next line before we finish.
  748. ssize_t line_start = position;
  749. SkipHorizontalWhitespace(source_text, position);
  750. // Now that we're done scanning, update to the latest line index and indent.
  751. line_index_ = line_index;
  752. current_line_info()->indent = position - line_start;
  753. }
  754. auto Lexer::LexNumericLiteral(llvm::StringRef source_text, ssize_t& position)
  755. -> LexResult {
  756. std::optional<NumericLiteral> literal =
  757. NumericLiteral::Lex(source_text.substr(position));
  758. if (!literal) {
  759. return LexError(source_text, position);
  760. }
  761. int int_column = ComputeColumn(position);
  762. int token_size = literal->text().size();
  763. position += token_size;
  764. return VariantMatch(
  765. literal->ComputeValue(emitter_),
  766. [&](NumericLiteral::IntValue&& value) {
  767. auto token = buffer_.AddToken({.kind = TokenKind::IntLiteral,
  768. .token_line = current_line(),
  769. .column = int_column});
  770. buffer_.GetTokenInfo(token).int_id =
  771. buffer_.value_stores_->ints().Add(std::move(value.value));
  772. return token;
  773. },
  774. [&](NumericLiteral::RealValue&& value) {
  775. auto token = buffer_.AddToken({.kind = TokenKind::RealLiteral,
  776. .token_line = current_line(),
  777. .column = int_column});
  778. buffer_.GetTokenInfo(token).real_id =
  779. buffer_.value_stores_->reals().Add(Real{
  780. .mantissa = value.mantissa,
  781. .exponent = value.exponent,
  782. .is_decimal = (value.radix == NumericLiteral::Radix::Decimal)});
  783. return token;
  784. },
  785. [&](NumericLiteral::UnrecoverableError) {
  786. auto token = buffer_.AddToken({
  787. .kind = TokenKind::Error,
  788. .token_line = current_line(),
  789. .column = int_column,
  790. .error_length = token_size,
  791. });
  792. return token;
  793. });
  794. }
  795. auto Lexer::LexStringLiteral(llvm::StringRef source_text, ssize_t& position)
  796. -> LexResult {
  797. std::optional<StringLiteral> literal =
  798. StringLiteral::Lex(source_text.substr(position));
  799. if (!literal) {
  800. return LexError(source_text, position);
  801. }
  802. LineIndex string_line = current_line();
  803. int string_column = ComputeColumn(position);
  804. ssize_t literal_size = literal->text().size();
  805. position += literal_size;
  806. // Update line and column information.
  807. if (literal->is_multi_line()) {
  808. while (current_line_info()->start + current_line_info()->length <
  809. position) {
  810. ++line_index_;
  811. current_line_info()->indent = string_column;
  812. }
  813. // Note that we've updated the current line at this point, but
  814. // `set_indent_` is already true from above. That remains correct as the
  815. // last line of the multi-line literal *also* has its indent set.
  816. }
  817. if (literal->is_terminated()) {
  818. auto string_id = buffer_.value_stores_->string_literal_values().Add(
  819. literal->ComputeValue(buffer_.allocator_, emitter_));
  820. auto token = buffer_.AddToken({.kind = TokenKind::StringLiteral,
  821. .token_line = string_line,
  822. .column = string_column,
  823. .string_literal_id = string_id});
  824. return token;
  825. } else {
  826. CARBON_DIAGNOSTIC(UnterminatedString, Error,
  827. "String is missing a terminator.");
  828. emitter_.Emit(literal->text().begin(), UnterminatedString);
  829. return buffer_.AddToken(
  830. {.kind = TokenKind::Error,
  831. .token_line = string_line,
  832. .column = string_column,
  833. .error_length = static_cast<int32_t>(literal_size)});
  834. }
  835. }
  836. auto Lexer::LexOneCharSymbolToken(llvm::StringRef source_text, TokenKind kind,
  837. ssize_t& position) -> TokenIndex {
  838. // Verify in a debug build that the incoming token kind is correct.
  839. CARBON_DCHECK(kind != TokenKind::Error);
  840. CARBON_DCHECK(kind.fixed_spelling().size() == 1);
  841. CARBON_DCHECK(source_text[position] == kind.fixed_spelling().front())
  842. << "Source text starts with '" << source_text[position]
  843. << "' instead of the spelling '" << kind.fixed_spelling()
  844. << "' of the incoming token kind '" << kind << "'";
  845. TokenIndex token = buffer_.AddToken({.kind = kind,
  846. .token_line = current_line(),
  847. .column = ComputeColumn(position)});
  848. ++position;
  849. return token;
  850. }
  851. auto Lexer::LexOpeningSymbolToken(llvm::StringRef source_text, TokenKind kind,
  852. ssize_t& position) -> LexResult {
  853. TokenIndex token = LexOneCharSymbolToken(source_text, kind, position);
  854. open_groups_.push_back(token);
  855. return token;
  856. }
  857. auto Lexer::LexClosingSymbolToken(llvm::StringRef source_text, TokenKind kind,
  858. ssize_t& position) -> LexResult {
  859. TokenIndex token = LexOneCharSymbolToken(source_text, kind, position);
  860. auto& token_info = buffer_.GetTokenInfo(token);
  861. // If there's not a matching opening symbol, just track that we had an error.
  862. // We will diagnose and recover when we reach the end of the file. See
  863. // `DiagnoseAndFixMismatchedBrackets` for details.
  864. if (LLVM_UNLIKELY(open_groups_.empty())) {
  865. has_mismatched_brackets_ = true;
  866. return token;
  867. }
  868. TokenIndex opening_token = open_groups_.pop_back_val();
  869. auto& opening_token_info = buffer_.GetTokenInfo(opening_token);
  870. if (LLVM_UNLIKELY(opening_token_info.kind != kind.opening_symbol())) {
  871. has_mismatched_brackets_ = true;
  872. return token;
  873. }
  874. opening_token_info.closing_token = token;
  875. token_info.opening_token = opening_token;
  876. return token;
  877. }
  878. auto Lexer::LexSymbolToken(llvm::StringRef source_text, ssize_t& position)
  879. -> LexResult {
  880. // One character symbols and grouping symbols are handled with dedicated
  881. // dispatch. We only lex the multi-character tokens here.
  882. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text.substr(position))
  883. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  884. .StartsWith(Spelling, TokenKind::Name)
  885. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling)
  886. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName)
  887. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName)
  888. #include "toolchain/lex/token_kind.def"
  889. .Default(TokenKind::Error);
  890. if (kind == TokenKind::Error) {
  891. return LexError(source_text, position);
  892. }
  893. TokenIndex token = buffer_.AddToken({.kind = kind,
  894. .token_line = current_line(),
  895. .column = ComputeColumn(position)});
  896. position += kind.fixed_spelling().size();
  897. return token;
  898. }
  899. auto Lexer::LexWordAsTypeLiteralToken(llvm::StringRef word, int column)
  900. -> LexResult {
  901. if (word.size() < 2) {
  902. // Too short to form one of these tokens.
  903. return LexResult::NoMatch();
  904. }
  905. if (word[1] < '1' || word[1] > '9') {
  906. // Doesn't start with a valid initial digit.
  907. return LexResult::NoMatch();
  908. }
  909. std::optional<TokenKind> kind;
  910. switch (word.front()) {
  911. case 'i':
  912. kind = TokenKind::IntTypeLiteral;
  913. break;
  914. case 'u':
  915. kind = TokenKind::UnsignedIntTypeLiteral;
  916. break;
  917. case 'f':
  918. kind = TokenKind::FloatTypeLiteral;
  919. break;
  920. default:
  921. return LexResult::NoMatch();
  922. };
  923. llvm::StringRef suffix = word.substr(1);
  924. if (!CanLexInt(emitter_, suffix)) {
  925. return buffer_.AddToken(
  926. {.kind = TokenKind::Error,
  927. .token_line = current_line(),
  928. .column = column,
  929. .error_length = static_cast<int32_t>(word.size())});
  930. }
  931. llvm::APInt suffix_value;
  932. if (suffix.getAsInteger(10, suffix_value)) {
  933. return LexResult::NoMatch();
  934. }
  935. auto token = buffer_.AddToken(
  936. {.kind = *kind, .token_line = current_line(), .column = column});
  937. buffer_.GetTokenInfo(token).int_id =
  938. buffer_.value_stores_->ints().Add(std::move(suffix_value));
  939. return token;
  940. }
  941. auto Lexer::LexKeywordOrIdentifier(llvm::StringRef source_text,
  942. ssize_t& position) -> LexResult {
  943. if (static_cast<unsigned char>(source_text[position]) > 0x7F) {
  944. // TODO: Need to add support for Unicode lexing.
  945. return LexError(source_text, position);
  946. }
  947. CARBON_CHECK(
  948. IsIdStartByteTable[static_cast<unsigned char>(source_text[position])]);
  949. int column = ComputeColumn(position);
  950. // Take the valid characters off the front of the source buffer.
  951. llvm::StringRef identifier_text =
  952. ScanForIdentifierPrefix(source_text.substr(position));
  953. CARBON_CHECK(!identifier_text.empty()) << "Must have at least one character!";
  954. position += identifier_text.size();
  955. // Check if the text is a type literal, and if so form such a literal.
  956. if (LexResult result = LexWordAsTypeLiteralToken(identifier_text, column)) {
  957. return result;
  958. }
  959. // Check if the text matches a keyword token, and if so use that.
  960. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  961. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name)
  962. #include "toolchain/lex/token_kind.def"
  963. .Default(TokenKind::Error);
  964. if (kind != TokenKind::Error) {
  965. return buffer_.AddToken(
  966. {.kind = kind, .token_line = current_line(), .column = column});
  967. }
  968. // Otherwise we have a generic identifier.
  969. return buffer_.AddToken(
  970. {.kind = TokenKind::Identifier,
  971. .token_line = current_line(),
  972. .column = column,
  973. .ident_id = buffer_.value_stores_->identifiers().Add(identifier_text)});
  974. }
  975. auto Lexer::LexHash(llvm::StringRef source_text, ssize_t& position)
  976. -> LexResult {
  977. // For `r#`, we already lexed an `r` identifier token. Detect that case and
  978. // replace that token with a raw identifier. We do this to keep identifier
  979. // lexing as fast as possible.
  980. // Look for the `r` token. Note that this is always in bounds because we
  981. // create a start of file token.
  982. auto& prev_token_info = buffer_.token_infos_.back();
  983. // If the previous token isn't the identifier `r`, or the character after `#`
  984. // isn't the start of an identifier, this is not a raw identifier.
  985. if (prev_token_info.kind != TokenKind::Identifier ||
  986. source_text[position - 1] != 'r' ||
  987. position + 1 == static_cast<ssize_t>(source_text.size()) ||
  988. !IsIdStartByteTable[static_cast<unsigned char>(
  989. source_text[position + 1])] ||
  990. prev_token_info.token_line != current_line() ||
  991. prev_token_info.column != ComputeColumn(position) - 1) {
  992. [[clang::musttail]] return LexStringLiteral(source_text, position);
  993. }
  994. CARBON_DCHECK(buffer_.value_stores_->identifiers().Get(
  995. prev_token_info.ident_id) == "r");
  996. // Take the valid characters off the front of the source buffer.
  997. llvm::StringRef identifier_text =
  998. ScanForIdentifierPrefix(source_text.substr(position + 1));
  999. CARBON_CHECK(!identifier_text.empty()) << "Must have at least one character!";
  1000. position += 1 + identifier_text.size();
  1001. // Replace the `r` identifier's value with the raw identifier.
  1002. // TODO: This token doesn't carry any indicator that it's raw, so
  1003. // diagnostics are unclear.
  1004. prev_token_info.ident_id =
  1005. buffer_.value_stores_->identifiers().Add(identifier_text);
  1006. return LexResult(TokenIndex(buffer_.token_infos_.size() - 1));
  1007. }
  1008. auto Lexer::LexError(llvm::StringRef source_text, ssize_t& position)
  1009. -> LexResult {
  1010. llvm::StringRef error_text =
  1011. source_text.substr(position).take_while([](char c) {
  1012. if (IsAlnum(c)) {
  1013. return false;
  1014. }
  1015. switch (c) {
  1016. case '_':
  1017. case '\t':
  1018. case '\n':
  1019. return false;
  1020. default:
  1021. break;
  1022. }
  1023. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  1024. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  1025. #include "toolchain/lex/token_kind.def"
  1026. .Default(true);
  1027. });
  1028. if (error_text.empty()) {
  1029. // TODO: Reimplement this to use the lexer properly. In the meantime,
  1030. // guarantee that we eat at least one byte.
  1031. error_text = source_text.substr(position, 1);
  1032. }
  1033. auto token = buffer_.AddToken(
  1034. {.kind = TokenKind::Error,
  1035. .token_line = current_line(),
  1036. .column = ComputeColumn(position),
  1037. .error_length = static_cast<int32_t>(error_text.size())});
  1038. CARBON_DIAGNOSTIC(UnrecognizedCharacters, Error,
  1039. "Encountered unrecognized characters while parsing.");
  1040. emitter_.Emit(error_text.begin(), UnrecognizedCharacters);
  1041. position += error_text.size();
  1042. return token;
  1043. }
  1044. auto Lexer::LexFileStart(llvm::StringRef source_text, ssize_t& position)
  1045. -> void {
  1046. // Before lexing any source text, add the start-of-file token so that code
  1047. // can assume a non-empty token buffer for the rest of lexing. Note that the
  1048. // start-of-file always has trailing space because it *is* whitespace.
  1049. buffer_.AddToken({.kind = TokenKind::FileStart,
  1050. .has_trailing_space = true,
  1051. .token_line = current_line(),
  1052. .column = 0});
  1053. // Also skip any horizontal whitespace and record the indentation of the
  1054. // first line.
  1055. SkipHorizontalWhitespace(source_text, position);
  1056. auto* line_info = current_line_info();
  1057. CARBON_CHECK(line_info->start == 0);
  1058. line_info->indent = position;
  1059. }
  1060. auto Lexer::LexFileEnd(llvm::StringRef source_text, ssize_t position) -> void {
  1061. CARBON_CHECK(position == static_cast<ssize_t>(source_text.size()));
  1062. // Check if the last line is empty and not the first line (and only). If so,
  1063. // re-pin the last line to be the prior one so that diagnostics and editors
  1064. // can treat newlines as terminators even though we internally handle them
  1065. // as separators in case of a missing newline on the last line. We do this
  1066. // here instead of detecting this when we see the newline to avoid more
  1067. // conditions along that fast path.
  1068. if (position == current_line_info()->start && line_index_ != 0) {
  1069. --line_index_;
  1070. --position;
  1071. } else {
  1072. // Update the line length as this is also the end of a line.
  1073. current_line_info()->length = ComputeColumn(position);
  1074. }
  1075. // The end-of-file token is always considered to be whitespace.
  1076. NoteWhitespace();
  1077. buffer_.AddToken({.kind = TokenKind::FileEnd,
  1078. .token_line = current_line(),
  1079. .column = ComputeColumn(position)});
  1080. // If we had any mismatched brackets, issue diagnostics and fix them.
  1081. if (has_mismatched_brackets_ || !open_groups_.empty()) {
  1082. DiagnoseAndFixMismatchedBrackets();
  1083. }
  1084. }
  1085. // A list of pending insertions to make into a tokenized buffer for error
  1086. // recovery. These are buffered so that we can perform them in linear time.
  1087. class Lexer::ErrorRecoveryBuffer {
  1088. public:
  1089. explicit ErrorRecoveryBuffer(TokenizedBuffer& buffer) : buffer_(buffer) {}
  1090. auto empty() const -> bool {
  1091. return new_tokens_.empty() && !any_error_tokens_;
  1092. }
  1093. // Insert a recovery token of kind `kind` before `insert_before`. Note that we
  1094. // currently require insertions to be specified in source order, but this
  1095. // restriction would be easy to relax.
  1096. auto InsertBefore(TokenIndex insert_before, TokenKind kind) -> void {
  1097. CARBON_CHECK(insert_before.index > 0)
  1098. << "Cannot insert before the start of file token.";
  1099. CARBON_CHECK(new_tokens_.empty() ||
  1100. new_tokens_.back().first <= insert_before)
  1101. << "Insertions performed out of order.";
  1102. // Find the end of the token before the target token, and add the new token
  1103. // there. Note that new_token_column is a 1-based column number.
  1104. auto insert_after = TokenIndex(insert_before.index - 1);
  1105. auto [new_token_line, new_token_column] = buffer_.GetEndLoc(insert_after);
  1106. new_tokens_.push_back(
  1107. {insert_before,
  1108. {.kind = kind,
  1109. .has_trailing_space = buffer_.HasTrailingWhitespace(insert_after),
  1110. .is_recovery = true,
  1111. .token_line = new_token_line,
  1112. .column = new_token_column - 1}});
  1113. }
  1114. // Replace the given token with an error token. We do this immediately,
  1115. // because we don't benefit from buffering it.
  1116. auto ReplaceWithError(TokenIndex token) -> void {
  1117. auto& token_info = buffer_.GetTokenInfo(token);
  1118. token_info.error_length = buffer_.GetTokenText(token).size();
  1119. token_info.kind = TokenKind::Error;
  1120. any_error_tokens_ = true;
  1121. }
  1122. // Merge the recovery tokens into the token list of the tokenized buffer.
  1123. auto Apply() -> void {
  1124. auto old_tokens = std::move(buffer_.token_infos_);
  1125. buffer_.token_infos_.clear();
  1126. buffer_.token_infos_.reserve(old_tokens.size() + new_tokens_.size());
  1127. int old_tokens_offset = 0;
  1128. for (auto [next_offset, info] : new_tokens_) {
  1129. buffer_.token_infos_.append(old_tokens.begin() + old_tokens_offset,
  1130. old_tokens.begin() + next_offset.index);
  1131. buffer_.AddToken(info);
  1132. old_tokens_offset = next_offset.index;
  1133. }
  1134. buffer_.token_infos_.append(old_tokens.begin() + old_tokens_offset,
  1135. old_tokens.end());
  1136. }
  1137. // Perform bracket matching to fix cross-references between tokens. This must
  1138. // be done after all recovery is performed and all brackets match, because
  1139. // recovery will change token indexes.
  1140. auto FixTokenCrossReferences() -> void {
  1141. llvm::SmallVector<TokenIndex> open_groups;
  1142. for (auto token : buffer_.tokens()) {
  1143. auto kind = buffer_.GetKind(token);
  1144. if (kind.is_opening_symbol()) {
  1145. open_groups.push_back(token);
  1146. } else if (kind.is_closing_symbol()) {
  1147. CARBON_CHECK(!open_groups.empty()) << "Failed to balance brackets";
  1148. auto opening_token = open_groups.pop_back_val();
  1149. CARBON_CHECK(kind ==
  1150. buffer_.GetTokenInfo(opening_token).kind.closing_symbol())
  1151. << "Failed to balance brackets";
  1152. auto& opening_token_info = buffer_.GetTokenInfo(opening_token);
  1153. auto& closing_token_info = buffer_.GetTokenInfo(token);
  1154. opening_token_info.closing_token = token;
  1155. closing_token_info.opening_token = opening_token;
  1156. }
  1157. }
  1158. }
  1159. private:
  1160. TokenizedBuffer& buffer_;
  1161. // A list of tokens to insert into the token stream to fix mismatched
  1162. // brackets. The first element in each pair is the original token index to
  1163. // insert the new token before.
  1164. llvm::SmallVector<std::pair<TokenIndex, TokenizedBuffer::TokenInfo>>
  1165. new_tokens_;
  1166. // Whether we have changed any tokens into error tokens.
  1167. bool any_error_tokens_ = false;
  1168. };
  1169. // Issue an UnmatchedOpening diagnostic.
  1170. static auto DiagnoseUnmatchedOpening(TokenDiagnosticEmitter& emitter,
  1171. TokenIndex opening_token) -> void {
  1172. CARBON_DIAGNOSTIC(UnmatchedOpening, Error,
  1173. "Opening symbol without a corresponding closing symbol.");
  1174. emitter.Emit(opening_token, UnmatchedOpening);
  1175. }
  1176. // If brackets didn't pair or nest properly, find a set of places to insert
  1177. // brackets to fix the nesting, issue suitable diagnostics, and update the
  1178. // token list to describe the fixes.
  1179. auto Lexer::DiagnoseAndFixMismatchedBrackets() -> void {
  1180. ErrorRecoveryBuffer fixes(buffer_);
  1181. // Look for mismatched brackets and decide where to add tokens to fix them.
  1182. //
  1183. // TODO: For now, we use a greedy algorithm for this. We could do better by
  1184. // taking indentation into account. For example:
  1185. //
  1186. // 1 fn F() {
  1187. // 2 if (thing1)
  1188. // 3 thing2;
  1189. // 4 }
  1190. // 5 }
  1191. //
  1192. // Here, we'll match the `{` on line 1 with the `}` on line 4, and then
  1193. // report that the `}` on line 5 is unmatched. Instead, we should notice that
  1194. // line 1 matches better with line 5 due to indentation, and work out that
  1195. // the missing `{` was on line 2, also based on indentation.
  1196. open_groups_.clear();
  1197. for (auto token : buffer_.tokens()) {
  1198. auto kind = buffer_.GetKind(token);
  1199. if (kind.is_opening_symbol()) {
  1200. open_groups_.push_back(token);
  1201. continue;
  1202. }
  1203. if (!kind.is_closing_symbol()) {
  1204. continue;
  1205. }
  1206. // Find the innermost matching opening symbol.
  1207. auto opening_it = std::find_if(
  1208. open_groups_.rbegin(), open_groups_.rend(),
  1209. [&](TokenIndex opening_token) {
  1210. return buffer_.GetTokenInfo(opening_token).kind.closing_symbol() ==
  1211. kind;
  1212. });
  1213. if (opening_it == open_groups_.rend()) {
  1214. CARBON_DIAGNOSTIC(
  1215. UnmatchedClosing, Error,
  1216. "Closing symbol without a corresponding opening symbol.");
  1217. token_emitter_.Emit(token, UnmatchedClosing);
  1218. fixes.ReplaceWithError(token);
  1219. continue;
  1220. }
  1221. // All intermediate open tokens have no matching close token.
  1222. for (auto it = open_groups_.rbegin(); it != opening_it; ++it) {
  1223. DiagnoseUnmatchedOpening(token_emitter_, *it);
  1224. // Add a closing bracket for the unclosed group here.
  1225. //
  1226. // TODO: Indicate in the diagnostic that we did this, perhaps by
  1227. // annotating the snippet.
  1228. auto opening_kind = buffer_.GetKind(*it);
  1229. fixes.InsertBefore(token, opening_kind.closing_symbol());
  1230. }
  1231. open_groups_.erase(opening_it.base() - 1, open_groups_.end());
  1232. }
  1233. // Diagnose any remaining unmatched opening symbols.
  1234. for (auto token : open_groups_) {
  1235. // We don't have a good location to insert a close bracket. Convert the
  1236. // opening token from a bracket to an error.
  1237. DiagnoseUnmatchedOpening(token_emitter_, token);
  1238. fixes.ReplaceWithError(token);
  1239. }
  1240. CARBON_CHECK(!fixes.empty()) << "Didn't find anything to fix";
  1241. fixes.Apply();
  1242. fixes.FixTokenCrossReferences();
  1243. }
  1244. auto Lex(SharedValueStores& value_stores, SourceBuffer& source,
  1245. DiagnosticConsumer& consumer) -> TokenizedBuffer {
  1246. return Lexer(value_stores, source, consumer).Lex();
  1247. }
  1248. } // namespace Carbon::Lex