lex.cpp 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/lex/lex.h"
  5. #include <array>
  6. #include <limits>
  7. #include <optional>
  8. #include <utility>
  9. #include "common/check.h"
  10. #include "common/variant_helpers.h"
  11. #include "llvm/ADT/StringRef.h"
  12. #include "llvm/ADT/StringSwitch.h"
  13. #include "llvm/Support/Compiler.h"
  14. #include "toolchain/base/shared_value_stores.h"
  15. #include "toolchain/lex/character_set.h"
  16. #include "toolchain/lex/helpers.h"
  17. #include "toolchain/lex/numeric_literal.h"
  18. #include "toolchain/lex/string_literal.h"
  19. #include "toolchain/lex/token_index.h"
  20. #include "toolchain/lex/token_kind.h"
  21. #include "toolchain/lex/tokenized_buffer.h"
  22. #if __ARM_NEON
  23. #include <arm_neon.h>
  24. #define CARBON_USE_SIMD 1
  25. #elif __x86_64__
  26. #include <x86intrin.h>
  27. #define CARBON_USE_SIMD 1
  28. #else
  29. #define CARBON_USE_SIMD 0
  30. #endif
  31. namespace Carbon::Lex {
  32. // Implementation of the lexer logic itself.
  33. //
  34. // The design is that lexing can loop over the source buffer, consuming it into
  35. // tokens by calling into this API. This class handles the state and breaks down
  36. // the different lexing steps that may be used. It directly updates the provided
  37. // tokenized buffer with the lexed tokens.
  38. //
  39. // We'd typically put this in an anonymous namespace, but it is `friend`-ed by
  40. // the `TokenizedBuffer`. One of the important benefits of being in an anonymous
  41. // namespace is having internal linkage. That allows the optimizer to much more
  42. // aggressively inline away functions that are called in only one place. We keep
  43. // that benefit for now by using the `internal_linkage` attribute.
  44. //
  45. // TODO: Investigate ways to refactor the code that allow moving this into an
  46. // anonymous namespace without overly exposing implementation details of the
  47. // `TokenizedBuffer` or undermining the performance constraints of the lexer.
  48. class [[clang::internal_linkage]] Lexer {
  49. public:
  50. using TokenInfo = TokenizedBuffer::TokenInfo;
  51. using LineInfo = TokenizedBuffer::LineInfo;
  52. // Symbolic result of a lexing action. This indicates whether we successfully
  53. // lexed a token, or whether other lexing actions should be attempted.
  54. //
  55. // While it wraps a simple boolean state, its API both helps make the failures
  56. // more self documenting, and by consuming the actual token constructively
  57. // when one is produced, it helps ensure the correct result is returned.
  58. class LexResult {
  59. public:
  60. // Consumes (and discard) a valid token to construct a result
  61. // indicating a token has been produced. Relies on implicit conversions.
  62. // NOLINTNEXTLINE(google-explicit-constructor)
  63. LexResult(TokenIndex /*discarded_token*/) : LexResult(true) {}
  64. // Returns a result indicating no token was produced.
  65. static auto NoMatch() -> LexResult { return LexResult(false); }
  66. // Tests whether a token was produced by the lexing routine, and
  67. // the lexer can continue forming tokens.
  68. explicit operator bool() const { return formed_token_; }
  69. private:
  70. explicit LexResult(bool formed_token) : formed_token_(formed_token) {}
  71. bool formed_token_;
  72. };
  73. Lexer(SharedValueStores& value_stores, SourceBuffer& source,
  74. Diagnostics::Consumer& consumer)
  75. : buffer_(value_stores, source),
  76. consumer_(consumer),
  77. emitter_(&consumer_, &buffer_),
  78. token_emitter_(&consumer_, &buffer_) {}
  79. // Find all line endings and create the line data structures.
  80. //
  81. // Explicitly kept out-of-line because this is a significant loop that is
  82. // useful to have in the profile and it doesn't simplify by inlining at all.
  83. // But because it can, the compiler will flatten this otherwise.
  84. [[gnu::noinline]] auto MakeLines(llvm::StringRef source_text) -> void;
  85. auto current_line() -> LineIndex { return LineIndex(line_index_); }
  86. auto current_line_info() -> LineInfo* {
  87. return &buffer_.line_infos_[line_index_];
  88. }
  89. auto next_line() -> LineIndex { return LineIndex(line_index_ + 1); }
  90. auto next_line_info() -> LineInfo* {
  91. CARBON_CHECK(line_index_ + 1 <
  92. static_cast<ssize_t>(buffer_.line_infos_.size()));
  93. return &buffer_.line_infos_[line_index_ + 1];
  94. }
  95. // Note when the lexer has encountered whitespace, and the next lexed token
  96. // should reflect that it was preceded by some amount of whitespace.
  97. auto NoteWhitespace() -> void { has_leading_space_ = true; }
  98. // Add a lexed token to the tokenized buffer, and reset any token-specific
  99. // state tracked in the lexer for the next token.
  100. auto AddLexedToken(TokenInfo info) -> TokenIndex {
  101. has_leading_space_ = false;
  102. return buffer_.AddToken(info);
  103. }
  104. // Lexes a token with no payload: builds the correctly encoded token info,
  105. // adds it to the tokenized buffer and returns the token index.
  106. auto LexToken(TokenKind kind, int32_t byte_offset) -> TokenIndex {
  107. // Check that we don't accidentally call this for one of the token kinds
  108. // that *always* has a payload up front.
  109. CARBON_DCHECK(!kind.IsOneOf(
  110. {TokenKind::Identifier, TokenKind::StringLiteral, TokenKind::IntLiteral,
  111. TokenKind::IntTypeLiteral, TokenKind::UnsignedIntTypeLiteral,
  112. TokenKind::FloatTypeLiteral, TokenKind::RealLiteral,
  113. TokenKind::Error}));
  114. return AddLexedToken(TokenInfo(kind, has_leading_space_, byte_offset));
  115. }
  116. // Lexes a token with a payload: builds the correctly encoded token info,
  117. // adds it to the tokenized buffer and returns the token index.
  118. auto LexTokenWithPayload(TokenKind kind, int token_payload,
  119. int32_t byte_offset) -> TokenIndex {
  120. return AddLexedToken(
  121. TokenInfo(kind, has_leading_space_, token_payload, byte_offset));
  122. }
  123. auto SkipHorizontalWhitespace(llvm::StringRef source_text, ssize_t& position)
  124. -> void;
  125. auto LexHorizontalWhitespace(llvm::StringRef source_text, ssize_t& position)
  126. -> void;
  127. auto LexVerticalWhitespace(llvm::StringRef source_text, ssize_t& position)
  128. -> void;
  129. auto LexCR(llvm::StringRef source_text, ssize_t& position) -> void;
  130. auto LexCommentOrSlash(llvm::StringRef source_text, ssize_t& position)
  131. -> void;
  132. auto LexComment(llvm::StringRef source_text, ssize_t& position) -> void;
  133. // Determines whether a real literal can be formed at the current location.
  134. // This is the case unless the preceding token is `.` or `->` and there is no
  135. // intervening whitespace.
  136. auto CanFormRealLiteral() -> bool;
  137. auto LexNumericLiteral(llvm::StringRef source_text, ssize_t& position)
  138. -> LexResult;
  139. auto LexStringLiteral(llvm::StringRef source_text, ssize_t& position)
  140. -> LexResult;
  141. auto LexOneCharSymbolToken(llvm::StringRef source_text, TokenKind kind,
  142. ssize_t& position) -> TokenIndex;
  143. auto LexOpeningSymbolToken(llvm::StringRef source_text, TokenKind kind,
  144. ssize_t& position) -> LexResult;
  145. auto LexClosingSymbolToken(llvm::StringRef source_text, TokenKind kind,
  146. ssize_t& position) -> LexResult;
  147. auto LexSymbolToken(llvm::StringRef source_text, ssize_t& position)
  148. -> LexResult;
  149. // Given a word that has already been lexed, determine whether it is a type
  150. // literal and if so form the corresponding token.
  151. auto LexWordAsTypeLiteralToken(llvm::StringRef word, int32_t byte_offset)
  152. -> LexResult;
  153. auto LexKeywordOrIdentifier(llvm::StringRef source_text, ssize_t& position)
  154. -> LexResult;
  155. auto LexHash(llvm::StringRef source_text, ssize_t& position) -> LexResult;
  156. auto LexError(llvm::StringRef source_text, ssize_t& position) -> LexResult;
  157. auto LexFileStart(llvm::StringRef source_text, ssize_t& position) -> void;
  158. auto LexFileEnd(llvm::StringRef source_text, ssize_t position) -> void;
  159. // Perform final checking and cleanup that should be done once we have
  160. // finished lexing the whole file, and before we consider the tokenized buffer
  161. // to be complete.
  162. auto Finalize() -> void;
  163. auto DiagnoseAndFixMismatchedBrackets() -> void;
  164. // The main entry point for dispatching through the lexer's table. This method
  165. // should always fully consume the source text.
  166. auto Lex() && -> TokenizedBuffer;
  167. private:
  168. class ErrorRecoveryBuffer;
  169. TokenizedBuffer buffer_;
  170. ssize_t line_index_;
  171. // Tracks whether the lexer has encountered whitespace that will be leading
  172. // whitespace for the next lexed token. Reset after each token lexed.
  173. bool has_leading_space_ = false;
  174. llvm::SmallVector<TokenIndex> open_groups_;
  175. bool has_mismatched_brackets_ = false;
  176. Diagnostics::ErrorTrackingConsumer consumer_;
  177. TokenizedBuffer::SourcePointerDiagnosticEmitter emitter_;
  178. TokenizedBuffer::TokenDiagnosticEmitter token_emitter_;
  179. };
  180. #if CARBON_USE_SIMD
  181. namespace {
  182. #if __ARM_NEON
  183. using SimdMaskT = uint8x16_t;
  184. #elif __x86_64__
  185. using SimdMaskT = __m128i;
  186. #else
  187. #error "Unsupported SIMD architecture!"
  188. #endif
  189. using SimdMaskArrayT = std::array<SimdMaskT, sizeof(SimdMaskT) + 1>;
  190. } // namespace
  191. // A table of masks to include 0-16 bytes of an SSE register.
  192. static constexpr SimdMaskArrayT PrefixMasks = []() constexpr {
  193. SimdMaskArrayT masks = {};
  194. for (int i = 1; i < static_cast<int>(masks.size()); ++i) {
  195. masks[i] =
  196. // The SIMD types and constexpr require a C-style cast.
  197. // NOLINTNEXTLINE(google-readability-casting)
  198. (SimdMaskT)(std::numeric_limits<unsigned __int128>::max() >>
  199. ((sizeof(SimdMaskT) - i) * 8));
  200. }
  201. return masks;
  202. }();
  203. #endif // CARBON_USE_SIMD
  204. // A table of booleans that we can use to classify bytes as being valid
  205. // identifier start. This is used by raw identifier detection.
  206. static constexpr std::array<bool, 256> IsIdStartByteTable = [] {
  207. std::array<bool, 256> table = {};
  208. for (char c = 'A'; c <= 'Z'; ++c) {
  209. table[c] = true;
  210. }
  211. for (char c = 'a'; c <= 'z'; ++c) {
  212. table[c] = true;
  213. }
  214. table['_'] = true;
  215. return table;
  216. }();
  217. // A table of booleans that we can use to classify bytes as being valid
  218. // identifier (or keyword) characters. This is used in the generic,
  219. // non-vectorized fallback code to scan for length of an identifier.
  220. static constexpr std::array<bool, 256> IsIdByteTable = [] {
  221. std::array<bool, 256> table = IsIdStartByteTable;
  222. for (char c = '0'; c <= '9'; ++c) {
  223. table[c] = true;
  224. }
  225. return table;
  226. }();
  227. // Baseline scalar version, also available for scalar-fallback in SIMD code.
  228. // Uses `ssize_t` for performance when indexing in the loop.
  229. //
  230. // TODO: This assumes all Unicode characters are non-identifiers.
  231. static auto ScanForIdentifierPrefixScalar(llvm::StringRef text, ssize_t i)
  232. -> llvm::StringRef {
  233. const ssize_t size = text.size();
  234. while (i < size && IsIdByteTable[static_cast<unsigned char>(text[i])]) {
  235. ++i;
  236. }
  237. return text.substr(0, i);
  238. }
  239. #if CARBON_USE_SIMD && __x86_64__
  240. // The SIMD code paths uses a scheme derived from the techniques in Geoff
  241. // Langdale and Daniel Lemire's work on parsing JSON[1]. Specifically, that
  242. // paper outlines a technique of using two 4-bit indexed in-register look-up
  243. // tables (LUTs) to classify bytes in a branchless SIMD code sequence.
  244. //
  245. // [1]: https://arxiv.org/pdf/1902.08318.pdf
  246. //
  247. // The goal is to get a bit mask classifying different sets of bytes. For each
  248. // input byte, we first test for a high bit indicating a UTF-8 encoded Unicode
  249. // character. Otherwise, we want the mask bits to be set with the following
  250. // logic derived by inspecting the high nibble and low nibble of the input:
  251. // bit0 = 1 for `_`: high `0x5` and low `0xF`
  252. // bit1 = 1 for `0-9`: high `0x3` and low `0x0` - `0x9`
  253. // bit2 = 1 for `A-O` and `a-o`: high `0x4` or `0x6` and low `0x1` - `0xF`
  254. // bit3 = 1 for `P-Z` and 'p-z': high `0x5` or `0x7` and low `0x0` - `0xA`
  255. // bit4 = unused
  256. // bit5 = unused
  257. // bit6 = unused
  258. // bit7 = unused
  259. //
  260. // No bits set means definitively non-ID ASCII character.
  261. //
  262. // Bits 4-7 remain unused if we need to classify more characters.
  263. namespace {
  264. // Struct used to implement the nibble LUT for SIMD implementations.
  265. //
  266. // Forced to 16-byte alignment to ensure we can load it easily in SIMD code.
  267. struct alignas(16) NibbleLUT {
  268. auto Load() const -> __m128i {
  269. return _mm_load_si128(reinterpret_cast<const __m128i*>(this));
  270. }
  271. uint8_t nibble_0;
  272. uint8_t nibble_1;
  273. uint8_t nibble_2;
  274. uint8_t nibble_3;
  275. uint8_t nibble_4;
  276. uint8_t nibble_5;
  277. uint8_t nibble_6;
  278. uint8_t nibble_7;
  279. uint8_t nibble_8;
  280. uint8_t nibble_9;
  281. uint8_t nibble_a;
  282. uint8_t nibble_b;
  283. uint8_t nibble_c;
  284. uint8_t nibble_d;
  285. uint8_t nibble_e;
  286. uint8_t nibble_f;
  287. };
  288. } // namespace
  289. static constexpr NibbleLUT HighLUT = {
  290. .nibble_0 = 0b0000'0000,
  291. .nibble_1 = 0b0000'0000,
  292. .nibble_2 = 0b0000'0000,
  293. .nibble_3 = 0b0000'0010,
  294. .nibble_4 = 0b0000'0100,
  295. .nibble_5 = 0b0000'1001,
  296. .nibble_6 = 0b0000'0100,
  297. .nibble_7 = 0b0000'1000,
  298. .nibble_8 = 0b1000'0000,
  299. .nibble_9 = 0b1000'0000,
  300. .nibble_a = 0b1000'0000,
  301. .nibble_b = 0b1000'0000,
  302. .nibble_c = 0b1000'0000,
  303. .nibble_d = 0b1000'0000,
  304. .nibble_e = 0b1000'0000,
  305. .nibble_f = 0b1000'0000,
  306. };
  307. static constexpr NibbleLUT LowLUT = {
  308. .nibble_0 = 0b1000'1010,
  309. .nibble_1 = 0b1000'1110,
  310. .nibble_2 = 0b1000'1110,
  311. .nibble_3 = 0b1000'1110,
  312. .nibble_4 = 0b1000'1110,
  313. .nibble_5 = 0b1000'1110,
  314. .nibble_6 = 0b1000'1110,
  315. .nibble_7 = 0b1000'1110,
  316. .nibble_8 = 0b1000'1110,
  317. .nibble_9 = 0b1000'1110,
  318. .nibble_a = 0b1000'1100,
  319. .nibble_b = 0b1000'0100,
  320. .nibble_c = 0b1000'0100,
  321. .nibble_d = 0b1000'0100,
  322. .nibble_e = 0b1000'0100,
  323. .nibble_f = 0b1000'0101,
  324. };
  325. static auto ScanForIdentifierPrefixX86(llvm::StringRef text)
  326. -> llvm::StringRef {
  327. const auto high_lut = HighLUT.Load();
  328. const auto low_lut = LowLUT.Load();
  329. // Use `ssize_t` for performance here as we index memory in a tight loop.
  330. ssize_t i = 0;
  331. const ssize_t size = text.size();
  332. while ((i + 16) <= size) {
  333. __m128i input =
  334. _mm_loadu_si128(reinterpret_cast<const __m128i*>(text.data() + i));
  335. // The high bits of each byte indicate a non-ASCII character encoded using
  336. // UTF-8. Test those and fall back to the scalar code if present. These
  337. // bytes will also cause spurious zeros in the LUT results, but we can
  338. // ignore that because we track them independently here.
  339. #if __SSE4_1__
  340. if (!_mm_test_all_zeros(_mm_set1_epi8(0x80), input)) {
  341. break;
  342. }
  343. #else
  344. if (_mm_movemask_epi8(input) != 0) {
  345. break;
  346. }
  347. #endif
  348. // Do two LUT lookups and mask the results together to get the results for
  349. // both low and high nibbles. Note that we don't need to mask out the high
  350. // bit of input here because we track that above for UTF-8 handling.
  351. __m128i low_mask = _mm_shuffle_epi8(low_lut, input);
  352. // Note that the input needs to be masked to only include the high nibble or
  353. // we could end up with bit7 set forcing the result to a zero byte.
  354. __m128i input_high =
  355. _mm_and_si128(_mm_srli_epi32(input, 4), _mm_set1_epi8(0x0f));
  356. __m128i high_mask = _mm_shuffle_epi8(high_lut, input_high);
  357. __m128i mask = _mm_and_si128(low_mask, high_mask);
  358. // Now compare to find the completely zero bytes.
  359. __m128i id_byte_mask_vec = _mm_cmpeq_epi8(mask, _mm_setzero_si128());
  360. int tail_ascii_mask = _mm_movemask_epi8(id_byte_mask_vec);
  361. // Check if there are bits in the tail mask, which means zero bytes and the
  362. // end of the identifier. We could do this without materializing the scalar
  363. // mask on more recent CPUs, but we generally expect the median length we
  364. // encounter to be <16 characters and so we avoid the extra instruction in
  365. // that case and predict this branch to succeed so it is laid out in a
  366. // reasonable way.
  367. if (LLVM_LIKELY(tail_ascii_mask != 0)) {
  368. // Move past the definitively classified bytes that are part of the
  369. // identifier, and return the complete identifier text.
  370. i += __builtin_ctz(tail_ascii_mask);
  371. return text.substr(0, i);
  372. }
  373. i += 16;
  374. }
  375. return ScanForIdentifierPrefixScalar(text, i);
  376. }
  377. #endif // CARBON_USE_SIMD && __x86_64__
  378. // Scans the provided text and returns the prefix `StringRef` of contiguous
  379. // identifier characters.
  380. //
  381. // This is a performance sensitive function and where profitable uses vectorized
  382. // code sequences to optimize its scanning. When modifying, the identifier
  383. // lexing benchmarks should be checked for regressions.
  384. //
  385. // Identifier characters here are currently the ASCII characters `[0-9A-Za-z_]`.
  386. //
  387. // TODO: Currently, this code does not implement Carbon's design for Unicode
  388. // characters in identifiers. It does work on UTF-8 code unit sequences, but
  389. // currently considers non-ASCII characters to be non-identifier characters.
  390. // Some work has been done to ensure the hot loop, while optimized, retains
  391. // enough information to add Unicode handling without completely destroying the
  392. // relevant optimizations.
  393. static auto ScanForIdentifierPrefix(llvm::StringRef text) -> llvm::StringRef {
  394. // Dispatch to an optimized architecture optimized routine.
  395. #if CARBON_USE_SIMD && __x86_64__
  396. return ScanForIdentifierPrefixX86(text);
  397. #elif CARBON_USE_SIMD && __ARM_NEON
  398. // Somewhat surprisingly, there is basically nothing worth doing in SIMD on
  399. // Arm to optimize this scan. The Neon SIMD operations end up requiring you to
  400. // move from the SIMD unit to the scalar unit in the critical path of finding
  401. // the offset of the end of an identifier. Current ARM cores make the code
  402. // sequences here (quite) unpleasant. For example, on Apple M1 and similar
  403. // cores, the latency is as much as 10 cycles just to extract from the vector.
  404. // SIMD might be more interesting on Neoverse cores, but it'd be nice to avoid
  405. // core-specific tunings at this point.
  406. //
  407. // If this proves problematic and critical to optimize, the current leading
  408. // theory is to have the newline searching code also create a bitmask for the
  409. // entire source file of identifier and non-identifier bytes, and then use the
  410. // bit-counting instructions here to do a fast scan of that bitmask. However,
  411. // crossing that bridge will add substantial complexity to the newline
  412. // scanner, and so currently we just use a boring scalar loop that pipelines
  413. // well.
  414. #endif
  415. return ScanForIdentifierPrefixScalar(text, 0);
  416. }
  417. using DispatchFunctionT = auto(Lexer& lexer, llvm::StringRef source_text,
  418. ssize_t position) -> void;
  419. using DispatchTableT = std::array<DispatchFunctionT*, 256>;
  420. static constexpr std::array<TokenKind, 256> OneCharTokenKindTable = [] {
  421. std::array<TokenKind, 256> table = {};
  422. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling) \
  423. table[(Spelling)[0]] = TokenKind::TokenName;
  424. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName) \
  425. table[(Spelling)[0]] = TokenKind::TokenName;
  426. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName) \
  427. table[(Spelling)[0]] = TokenKind::TokenName;
  428. #include "toolchain/lex/token_kind.def"
  429. return table;
  430. }();
  431. // We use a collection of static member functions for table-based dispatch to
  432. // lexer methods. These are named static member functions so that they show up
  433. // helpfully in profiles and backtraces, but they tend to not contain the
  434. // interesting logic and simply delegate to the relevant methods. All of their
  435. // signatures need to be exactly the same however in order to ensure we can
  436. // build efficient dispatch tables out of them. All of them end by doing a
  437. // must-tail return call to this routine. It handles continuing the dispatch
  438. // chain.
  439. static auto DispatchNext(Lexer& lexer, llvm::StringRef source_text,
  440. ssize_t position) -> void;
  441. // Define a set of dispatch functions that simply forward to a method that
  442. // lexes a token. This includes validating that an actual token was produced,
  443. // and continuing the dispatch.
  444. #define CARBON_DISPATCH_LEX_TOKEN(LexMethod) \
  445. static auto Dispatch##LexMethod(Lexer& lexer, llvm::StringRef source_text, \
  446. ssize_t position) -> void { \
  447. Lexer::LexResult result = lexer.LexMethod(source_text, position); \
  448. CARBON_CHECK(result, "Failed to form a token!"); \
  449. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  450. }
  451. CARBON_DISPATCH_LEX_TOKEN(LexError)
  452. CARBON_DISPATCH_LEX_TOKEN(LexSymbolToken)
  453. CARBON_DISPATCH_LEX_TOKEN(LexKeywordOrIdentifier)
  454. CARBON_DISPATCH_LEX_TOKEN(LexHash)
  455. CARBON_DISPATCH_LEX_TOKEN(LexNumericLiteral)
  456. CARBON_DISPATCH_LEX_TOKEN(LexStringLiteral)
  457. // A set of custom dispatch functions that pre-select the symbol token to lex.
  458. #define CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexMethod) \
  459. static auto Dispatch##LexMethod##SymbolToken( \
  460. Lexer& lexer, llvm::StringRef source_text, ssize_t position) -> void { \
  461. Lexer::LexResult result = lexer.LexMethod##SymbolToken( \
  462. source_text, \
  463. OneCharTokenKindTable[static_cast<unsigned char>( \
  464. source_text[position])], \
  465. position); \
  466. CARBON_CHECK(result, "Failed to form a token!"); \
  467. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  468. }
  469. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexOneChar)
  470. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexOpening)
  471. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexClosing)
  472. // Define a set of non-token dispatch functions that handle things like
  473. // whitespace and comments.
  474. #define CARBON_DISPATCH_LEX_NON_TOKEN(LexMethod) \
  475. static auto Dispatch##LexMethod(Lexer& lexer, llvm::StringRef source_text, \
  476. ssize_t position) -> void { \
  477. lexer.LexMethod(source_text, position); \
  478. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  479. }
  480. CARBON_DISPATCH_LEX_NON_TOKEN(LexHorizontalWhitespace)
  481. CARBON_DISPATCH_LEX_NON_TOKEN(LexVerticalWhitespace)
  482. CARBON_DISPATCH_LEX_NON_TOKEN(LexCR)
  483. CARBON_DISPATCH_LEX_NON_TOKEN(LexCommentOrSlash)
  484. // Build a table of function pointers that we can use to dispatch to the
  485. // correct lexer routine based on the first byte of source text.
  486. //
  487. // While it is tempting to simply use a `switch` on the first byte and
  488. // dispatch with cases into this, in practice that doesn't produce great code.
  489. // There seem to be two issues that are the root cause.
  490. //
  491. // First, there are lots of different values of bytes that dispatch to a
  492. // fairly small set of routines, and then some byte values that dispatch
  493. // differently for each byte. This pattern isn't one that the compiler-based
  494. // lowering of switches works well with -- it tries to balance all the cases,
  495. // and in doing so emits several compares and other control flow rather than a
  496. // simple jump table.
  497. //
  498. // Second, with a `case`, it isn't as obvious how to create a single, uniform
  499. // interface that is effective for *every* byte value, and thus makes for a
  500. // single consistent table-based dispatch. By forcing these to be function
  501. // pointers, we also coerce the code to use a strictly homogeneous structure
  502. // that can form a single dispatch table.
  503. //
  504. // These two actually interact -- the second issue is part of what makes the
  505. // non-table lowering in the first one desirable for many switches and cases.
  506. //
  507. // Ultimately, when table-based dispatch is such an important technique, we
  508. // get better results by taking full control and manually creating the
  509. // dispatch structures.
  510. //
  511. // The functions in this table also use tail-recursion to implement the loop
  512. // of the lexer. This is based on the technique described more fully for any
  513. // kind of byte-stream loop structure here:
  514. // https://blog.reverberate.org/2021/04/21/musttail-efficient-interpreters.html
  515. static constexpr auto MakeDispatchTable() -> DispatchTableT {
  516. DispatchTableT table = {};
  517. // First set the table entries to dispatch to our error token handler as the
  518. // base case. Everything valid comes from an override below.
  519. for (int i = 0; i < 256; ++i) {
  520. table[i] = &DispatchLexError;
  521. }
  522. // Symbols have some special dispatching. First, set the first character of
  523. // each symbol token spelling to dispatch to the symbol lexer. We don't
  524. // provide a pre-computed token here, so the symbol lexer will compute the
  525. // exact symbol token kind. We'll override this with more specific dispatch
  526. // below.
  527. #define CARBON_SYMBOL_TOKEN(TokenName, Spelling) \
  528. table[(Spelling)[0]] = &DispatchLexSymbolToken;
  529. #include "toolchain/lex/token_kind.def"
  530. // Now special cased single-character symbols that are guaranteed to not
  531. // join with another symbol. These are grouping symbols, terminators,
  532. // or separators in the grammar and have a good reason to be
  533. // orthogonal to any other punctuation. We do this separately because this
  534. // needs to override some of the generic handling above, and provide a
  535. // custom token.
  536. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling) \
  537. table[(Spelling)[0]] = &DispatchLexOneCharSymbolToken;
  538. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName) \
  539. table[(Spelling)[0]] = &DispatchLexOpeningSymbolToken;
  540. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName) \
  541. table[(Spelling)[0]] = &DispatchLexClosingSymbolToken;
  542. #include "toolchain/lex/token_kind.def"
  543. // Override the handling for `/` to consider comments as well as a `/`
  544. // symbol.
  545. table['/'] = &DispatchLexCommentOrSlash;
  546. table['_'] = &DispatchLexKeywordOrIdentifier;
  547. // Note that we don't use `llvm::seq` because this needs to be `constexpr`
  548. // evaluated.
  549. for (unsigned char c = 'a'; c <= 'z'; ++c) {
  550. table[c] = &DispatchLexKeywordOrIdentifier;
  551. }
  552. for (unsigned char c = 'A'; c <= 'Z'; ++c) {
  553. table[c] = &DispatchLexKeywordOrIdentifier;
  554. }
  555. // We dispatch all non-ASCII UTF-8 characters to the identifier lexing
  556. // as whitespace characters should already have been skipped and the
  557. // only remaining valid Unicode characters would be part of an
  558. // identifier. That code can either accept or reject.
  559. for (int i = 0x80; i < 0x100; ++i) {
  560. table[i] = &DispatchLexKeywordOrIdentifier;
  561. }
  562. for (unsigned char c = '0'; c <= '9'; ++c) {
  563. table[c] = &DispatchLexNumericLiteral;
  564. }
  565. table['\''] = &DispatchLexStringLiteral;
  566. table['"'] = &DispatchLexStringLiteral;
  567. table['#'] = &DispatchLexHash;
  568. table[' '] = &DispatchLexHorizontalWhitespace;
  569. table['\t'] = &DispatchLexHorizontalWhitespace;
  570. table['\n'] = &DispatchLexVerticalWhitespace;
  571. table['\r'] = &DispatchLexCR;
  572. return table;
  573. }
  574. static constexpr DispatchTableT DispatchTable = MakeDispatchTable();
  575. static auto DispatchNext(Lexer& lexer, llvm::StringRef source_text,
  576. ssize_t position) -> void {
  577. if (LLVM_LIKELY(position < static_cast<ssize_t>(source_text.size()))) {
  578. // The common case is to tail recurse based on the next character. Note
  579. // that because this is a must-tail return, this cannot fail to tail-call
  580. // and will not grow the stack. This is in essence a loop with dynamic
  581. // tail dispatch to the next stage of the loop.
  582. // NOLINTNEXTLINE(readability-avoid-return-with-void-value): For musttail.
  583. [[clang::musttail]] return DispatchTable[static_cast<unsigned char>(
  584. source_text[position])](lexer, source_text, position);
  585. }
  586. // When we finish the source text, stop recursing. We also hint this so that
  587. // the tail-dispatch is optimized as that's essentially the loop back-edge
  588. // and this is the loop exit.
  589. lexer.LexFileEnd(source_text, position);
  590. }
  591. // Estimate an upper bound on the number of identifiers we will need to lex.
  592. //
  593. // When analyzing both Carbon and LLVM's C++ code, we have found a roughly
  594. // normal distribution of unique identifiers in the file centered at 0.5 *
  595. // lines, and in the vast majority of cases bounded below 1.0 * lines. For
  596. // example, here is LLVM's distribution computed with `scripts/source_stats.py`
  597. // and rendered in an ASCII-art histogram:
  598. //
  599. // ## Unique IDs per 10 lines ## (median: 5, p90: 8, p95: 9, p99: 14)
  600. // 1 ids [ 29] ▍
  601. // 2 ids [ 282] ███▊
  602. // 3 ids [1492] ███████████████████▉
  603. // 4 ids [2674] ███████████████████████████████████▌
  604. // 5 ids [3011] ████████████████████████████████████████
  605. // 6 ids [2267] ██████████████████████████████▏
  606. // 7 ids [1549] ████████████████████▋
  607. // 8 ids [ 817] ██████████▉
  608. // 9 ids [ 301] ████
  609. // 10 ids [ 98] █▎
  610. //
  611. // (Trimmed to only cover 1 - 10 unique IDs per 10 lines of code, 272 files
  612. // with more unique IDs in the tail.)
  613. //
  614. // We have checked this distribution with several large codebases (currently
  615. // those at Google, happy to cross check with others) that use a similar coding
  616. // style, and it appears to be very consistent. However, we suspect it may be
  617. // dependent on the column width style. Currently, Carbon's toolchain style
  618. // specifies 80-columns, but if we expect the lexer to routinely see files in
  619. // different styles we should re-compute this estimate.
  620. static auto EstimateUpperBoundOnNumIdentifiers(int line_count) -> int {
  621. return line_count;
  622. }
  623. auto Lexer::Lex() && -> TokenizedBuffer {
  624. llvm::StringRef source_text = buffer_.source_->text();
  625. // Enforced by the source buffer, but something we heavily rely on throughout
  626. // the lexer.
  627. CARBON_CHECK(source_text.size() < std::numeric_limits<int32_t>::max());
  628. // First build up our line data structures.
  629. MakeLines(source_text);
  630. // Use the line count (and any other info needed from this scan) to make rough
  631. // estimated reservations of memory in the hot data structures used by the
  632. // lexer. In practice, scanning for lines is one of the easiest parts of the
  633. // lexer to accelerate, and we can use its results to minimize the cost of
  634. // incrementally growing data structures during the hot path of the lexer.
  635. //
  636. // Note that for hashtables we want estimates near the upper bound to minimize
  637. // growth across the vast majority of inputs. They will also typically reserve
  638. // more memory than we request due to load factor and rounding to power-of-two
  639. // size. This overshoot is usually fine for hot parts of the lexer where
  640. // latency is expected to be more important than minimizing memory usage.
  641. buffer_.value_stores_->identifiers().Reserve(
  642. EstimateUpperBoundOnNumIdentifiers(buffer_.line_infos_.size()));
  643. ssize_t position = 0;
  644. LexFileStart(source_text, position);
  645. // Manually enter the dispatch loop. This call will tail-recurse through the
  646. // dispatch table until everything from source_text is consumed.
  647. DispatchNext(*this, source_text, position);
  648. Finalize();
  649. if (consumer_.seen_error()) {
  650. buffer_.has_errors_ = true;
  651. }
  652. return std::move(buffer_);
  653. }
  654. auto Lexer::MakeLines(llvm::StringRef source_text) -> void {
  655. // We currently use `memchr` here which typically is well optimized to use
  656. // SIMD or other significantly faster than byte-wise scanning. We also use
  657. // carefully selected variables and the `ssize_t` type for performance and
  658. // code size of this hot loop.
  659. //
  660. // Note that the `memchr` approach here works equally well for LF and CR+LF
  661. // line endings. Either way, it finds the end of the line and the start of the
  662. // next line. The lexer below will find the CR byte and peek to see the
  663. // following LF and jump to the next line correctly. However, this approach
  664. // does *not* support plain CR or LF+CR line endings. Nor does it support
  665. // vertical tab or other vertical whitespace.
  666. //
  667. // TODO: Eventually, we should extend this to have correct fallback support
  668. // for handling CR, LF+CR, vertical tab, and other esoteric vertical
  669. // whitespace as line endings. Notably, including *mixtures* of them. This
  670. // will likely be somewhat tricky as even detecting their absence without
  671. // performance overhead and without a custom scanner here rather than memchr
  672. // is likely to be difficult.
  673. const char* const text = source_text.data();
  674. const ssize_t size = source_text.size();
  675. ssize_t start = 0;
  676. while (const char* nl = reinterpret_cast<const char*>(
  677. memchr(&text[start], '\n', size - start))) {
  678. ssize_t nl_index = nl - text;
  679. buffer_.AddLine(TokenizedBuffer::LineInfo(start));
  680. start = nl_index + 1;
  681. }
  682. // The last line ends at the end of the file.
  683. buffer_.AddLine(TokenizedBuffer::LineInfo(start));
  684. // If the last line wasn't empty, the file ends with an unterminated line.
  685. // Add an extra blank line so that we never need to handle the special case
  686. // of being on the last line inside the lexer and needing to not increment
  687. // to the next line.
  688. if (start != size) {
  689. buffer_.AddLine(TokenizedBuffer::LineInfo(size));
  690. }
  691. // Now that all the infos are allocated, get a fresh pointer to the first
  692. // info for use while lexing.
  693. line_index_ = 0;
  694. }
  695. auto Lexer::SkipHorizontalWhitespace(llvm::StringRef source_text,
  696. ssize_t& position) -> void {
  697. // Handle adjacent whitespace quickly. This comes up frequently for example
  698. // due to indentation. We don't expect *huge* runs, so just use a scalar
  699. // loop. While still scalar, this avoids repeated table dispatch and marking
  700. // whitespace.
  701. while (position < static_cast<ssize_t>(source_text.size()) &&
  702. (source_text[position] == ' ' || source_text[position] == '\t')) {
  703. ++position;
  704. }
  705. }
  706. auto Lexer::LexHorizontalWhitespace(llvm::StringRef source_text,
  707. ssize_t& position) -> void {
  708. CARBON_DCHECK(source_text[position] == ' ' || source_text[position] == '\t');
  709. NoteWhitespace();
  710. // Skip runs using an optimized code path.
  711. SkipHorizontalWhitespace(source_text, position);
  712. }
  713. auto Lexer::LexVerticalWhitespace(llvm::StringRef source_text,
  714. ssize_t& position) -> void {
  715. NoteWhitespace();
  716. ++line_index_;
  717. auto* line_info = current_line_info();
  718. ssize_t line_start = line_info->start;
  719. position = line_start;
  720. SkipHorizontalWhitespace(source_text, position);
  721. line_info->indent = position - line_start;
  722. }
  723. auto Lexer::LexCR(llvm::StringRef source_text, ssize_t& position) -> void {
  724. if (LLVM_LIKELY((position + 1) < static_cast<ssize_t>(source_text.size())) &&
  725. LLVM_LIKELY(source_text[position + 1] == '\n')) {
  726. // Skip to the vertical whitespace path, it will skip over both CR and LF.
  727. LexVerticalWhitespace(source_text, position);
  728. return;
  729. }
  730. CARBON_DIAGNOSTIC(UnsupportedLfCrLineEnding, Error,
  731. "the LF+CR line ending is not supported, only LF and CR+LF "
  732. "are supported");
  733. CARBON_DIAGNOSTIC(UnsupportedCrLineEnding, Error,
  734. "a raw CR line ending is not supported, only LF and CR+LF "
  735. "are supported");
  736. bool is_lfcr = position > 0 && source_text[position - 1] == '\n';
  737. // TODO: This diagnostic has an unfortunate snippet -- we should tweak the
  738. // snippet rendering to gracefully handle CRs.
  739. emitter_.Emit(source_text.begin() + position,
  740. is_lfcr ? UnsupportedLfCrLineEnding : UnsupportedCrLineEnding);
  741. // Recover by treating the CR as a horizontal whitespace. This should make our
  742. // whitespace rules largely work and parse cleanly without disrupting the line
  743. // tracking data structures that were pre-built.
  744. NoteWhitespace();
  745. ++position;
  746. }
  747. auto Lexer::LexCommentOrSlash(llvm::StringRef source_text, ssize_t& position)
  748. -> void {
  749. CARBON_DCHECK(source_text[position] == '/');
  750. // Both comments and slash symbols start with a `/`. We disambiguate with a
  751. // max-munch rule -- if the next character is another `/` then we lex it as
  752. // a comment start. If it isn't, then we lex as a slash. We also optimize
  753. // for the comment case as we expect that to be much more important for
  754. // overall lexer performance.
  755. if (LLVM_LIKELY(position + 1 < static_cast<ssize_t>(source_text.size()) &&
  756. source_text[position + 1] == '/')) {
  757. LexComment(source_text, position);
  758. return;
  759. }
  760. // This code path should produce a token, make sure that happens.
  761. LexResult result = LexSymbolToken(source_text, position);
  762. CARBON_CHECK(result, "Failed to form a token!");
  763. }
  764. auto Lexer::LexComment(llvm::StringRef source_text, ssize_t& position) -> void {
  765. CARBON_DCHECK(source_text.substr(position).starts_with("//"));
  766. int32_t comment_start = position;
  767. // Any comment must be the only non-whitespace on the line.
  768. const auto* line_info = current_line_info();
  769. if (LLVM_UNLIKELY(position != line_info->start + line_info->indent)) {
  770. CARBON_DIAGNOSTIC(TrailingComment, Error,
  771. "trailing comments are not permitted");
  772. emitter_.Emit(source_text.begin() + position, TrailingComment);
  773. // Note that we cannot fall-through here as the logic below doesn't handle
  774. // trailing comments. Instead, we treat trailing comments as vertical
  775. // whitespace, which already is designed to skip over any erroneous text at
  776. // the end of the line.
  777. LexVerticalWhitespace(source_text, position);
  778. buffer_.AddComment(line_info->indent, comment_start, position);
  779. return;
  780. }
  781. // The introducer '//' must be followed by whitespace or EOF.
  782. bool is_valid_after_slashes = true;
  783. if (position + 2 < static_cast<ssize_t>(source_text.size()) &&
  784. LLVM_UNLIKELY(!IsSpace(source_text[position + 2]))) {
  785. CARBON_DIAGNOSTIC(NoWhitespaceAfterCommentIntroducer, Error,
  786. "whitespace is required after '//'");
  787. emitter_.Emit(source_text.begin() + position + 2,
  788. NoWhitespaceAfterCommentIntroducer);
  789. // We use this to tweak the lexing of blocks below.
  790. is_valid_after_slashes = false;
  791. }
  792. // Skip over this line.
  793. ssize_t line_index = line_index_;
  794. ++line_index;
  795. position = buffer_.line_infos_[line_index].start;
  796. // A very common pattern is a long block of comment lines all with the same
  797. // indent and comment start. We skip these comment blocks in bulk both for
  798. // speed and to reduce redundant diagnostics if each line has the same
  799. // erroneous comment start like `//!`.
  800. //
  801. // When we have SIMD support this is even more important for speed, as short
  802. // indents can be scanned extremely quickly with SIMD and we expect these to
  803. // be the dominant cases.
  804. //
  805. // TODO: We should extend this to 32-byte SIMD on platforms with support.
  806. constexpr int MaxIndent = 13;
  807. const int indent = line_info->indent;
  808. const ssize_t first_line_start = line_info->start;
  809. ssize_t prefix_size = indent + (is_valid_after_slashes ? 3 : 2);
  810. auto skip_to_next_line = [this, indent, &line_index, &position] {
  811. // We're guaranteed to have a line here even on a comment on the last line
  812. // as we ensure there is an empty line structure at the end of every file.
  813. ++line_index;
  814. auto* next_line_info = &buffer_.line_infos_[line_index];
  815. next_line_info->indent = indent;
  816. position = next_line_info->start;
  817. };
  818. if (CARBON_USE_SIMD &&
  819. position + 16 < static_cast<ssize_t>(source_text.size()) &&
  820. indent <= MaxIndent) {
  821. // Load a mask based on the amount of text we want to compare.
  822. auto mask = PrefixMasks[prefix_size];
  823. #if __ARM_NEON
  824. // Load and mask the prefix of the current line.
  825. auto prefix = vld1q_u8(reinterpret_cast<const uint8_t*>(source_text.data() +
  826. first_line_start));
  827. prefix = vandq_u8(mask, prefix);
  828. do {
  829. // Load and mask the next line to consider's prefix.
  830. auto next_prefix = vld1q_u8(
  831. reinterpret_cast<const uint8_t*>(source_text.data() + position));
  832. next_prefix = vandq_u8(mask, next_prefix);
  833. // Compare the two prefixes and if any lanes differ, break.
  834. auto compare = vceqq_u8(prefix, next_prefix);
  835. if (vminvq_u8(compare) == 0) {
  836. break;
  837. }
  838. skip_to_next_line();
  839. } while (position + 16 < static_cast<ssize_t>(source_text.size()));
  840. #elif __x86_64__
  841. // Use the current line's prefix as the exemplar to compare against.
  842. // We don't mask here as we will mask when doing the comparison.
  843. auto prefix = _mm_loadu_si128(reinterpret_cast<const __m128i*>(
  844. source_text.data() + first_line_start));
  845. do {
  846. // Load the next line to consider's prefix.
  847. auto next_prefix = _mm_loadu_si128(
  848. reinterpret_cast<const __m128i*>(source_text.data() + position));
  849. // Compute the difference between the next line and our exemplar. Again,
  850. // we don't mask the difference because the comparison below will be
  851. // masked.
  852. auto prefix_diff = _mm_xor_si128(prefix, next_prefix);
  853. // If we have any differences (non-zero bits) within the mask, we can't
  854. // skip the next line too.
  855. if (!_mm_test_all_zeros(mask, prefix_diff)) {
  856. break;
  857. }
  858. skip_to_next_line();
  859. } while (position + 16 < static_cast<ssize_t>(source_text.size()));
  860. #else
  861. #error "Unsupported SIMD architecture!"
  862. #endif
  863. // TODO: If we finish the loop due to the position approaching the end of
  864. // the buffer we may fail to skip the last line in a comment block that
  865. // has an invalid initial sequence and thus emit extra diagnostics. We
  866. // should really fall through to the generic skipping logic, but the code
  867. // organization will need to change significantly to allow that.
  868. } else {
  869. while (position + prefix_size < static_cast<ssize_t>(source_text.size()) &&
  870. memcmp(source_text.data() + first_line_start,
  871. source_text.data() + position, prefix_size) == 0) {
  872. skip_to_next_line();
  873. }
  874. }
  875. buffer_.AddComment(indent, comment_start, position);
  876. // Now compute the indent of this next line before we finish.
  877. ssize_t line_start = position;
  878. SkipHorizontalWhitespace(source_text, position);
  879. // Now that we're done scanning, update to the latest line index and indent.
  880. line_index_ = line_index;
  881. current_line_info()->indent = position - line_start;
  882. }
  883. auto Lexer::CanFormRealLiteral() -> bool {
  884. // When a numeric literal immediately follows a `.` or `->` token, with no
  885. // intervening whitespace, a real literal is never formed.
  886. if (has_leading_space_) {
  887. return true;
  888. }
  889. auto kind = buffer_.GetKind(buffer_.tokens().end()[-1]);
  890. return kind != TokenKind::Period && kind != TokenKind::MinusGreater;
  891. }
  892. auto Lexer::LexNumericLiteral(llvm::StringRef source_text, ssize_t& position)
  893. -> LexResult {
  894. std::optional<NumericLiteral> literal =
  895. NumericLiteral::Lex(source_text.substr(position), CanFormRealLiteral());
  896. if (!literal) {
  897. return LexError(source_text, position);
  898. }
  899. // Capture the position before we step past the token.
  900. int32_t byte_offset = position;
  901. int token_size = literal->text().size();
  902. position += token_size;
  903. return VariantMatch(
  904. literal->ComputeValue(emitter_),
  905. [&](NumericLiteral::IntValue&& value) {
  906. return LexTokenWithPayload(TokenKind::IntLiteral,
  907. buffer_.value_stores_->ints()
  908. .AddUnsigned(std::move(value.value))
  909. .AsTokenPayload(),
  910. byte_offset);
  911. },
  912. [&](NumericLiteral::RealValue&& value) {
  913. auto real_id = buffer_.value_stores_->reals().Add(Real{
  914. .mantissa = value.mantissa,
  915. .exponent = value.exponent,
  916. .is_decimal = (value.radix == NumericLiteral::Radix::Decimal)});
  917. return LexTokenWithPayload(TokenKind::RealLiteral, real_id.index,
  918. byte_offset);
  919. },
  920. [&](NumericLiteral::UnrecoverableError) {
  921. return LexTokenWithPayload(TokenKind::Error, token_size, byte_offset);
  922. });
  923. }
  924. auto Lexer::LexStringLiteral(llvm::StringRef source_text, ssize_t& position)
  925. -> LexResult {
  926. std::optional<StringLiteral> literal =
  927. StringLiteral::Lex(source_text.substr(position));
  928. if (!literal) {
  929. return LexError(source_text, position);
  930. }
  931. // Capture the position before we step past the token.
  932. int32_t byte_offset = position;
  933. int string_column = byte_offset - current_line_info()->start;
  934. ssize_t literal_size = literal->text().size();
  935. position += literal_size;
  936. // Update line and column information.
  937. if (literal->is_multi_line()) {
  938. while (next_line_info()->start < position) {
  939. ++line_index_;
  940. current_line_info()->indent = string_column;
  941. }
  942. // Note that we've updated the current line at this point, but
  943. // `set_indent_` is already true from above. That remains correct as the
  944. // last line of the multi-line literal *also* has its indent set.
  945. }
  946. if (literal->is_terminated()) {
  947. auto string_id = buffer_.value_stores_->string_literal_values().Add(
  948. literal->ComputeValue(buffer_.allocator_, emitter_));
  949. return LexTokenWithPayload(TokenKind::StringLiteral, string_id.index,
  950. byte_offset);
  951. } else {
  952. CARBON_DIAGNOSTIC(UnterminatedString, Error,
  953. "string is missing a terminator");
  954. emitter_.Emit(literal->text().begin(), UnterminatedString);
  955. return LexTokenWithPayload(TokenKind::Error, literal_size, byte_offset);
  956. }
  957. }
  958. auto Lexer::LexOneCharSymbolToken(llvm::StringRef source_text, TokenKind kind,
  959. ssize_t& position) -> TokenIndex {
  960. // Verify in a debug build that the incoming token kind is correct.
  961. CARBON_DCHECK(kind != TokenKind::Error);
  962. CARBON_DCHECK(kind.fixed_spelling().size() == 1);
  963. CARBON_DCHECK(source_text[position] == kind.fixed_spelling().front(),
  964. "Source text starts with '{0}' instead of the spelling '{1}' "
  965. "of the incoming token kind '{2}'",
  966. source_text[position], kind.fixed_spelling(), kind);
  967. TokenIndex token = LexToken(kind, position);
  968. ++position;
  969. return token;
  970. }
  971. auto Lexer::LexOpeningSymbolToken(llvm::StringRef source_text, TokenKind kind,
  972. ssize_t& position) -> LexResult {
  973. CARBON_DCHECK(kind.is_opening_symbol());
  974. CARBON_DCHECK(kind.fixed_spelling().size() == 1);
  975. CARBON_DCHECK(source_text[position] == kind.fixed_spelling().front(),
  976. "Source text starts with '{0}' instead of the spelling '{1}' "
  977. "of the incoming token kind '{2}'",
  978. source_text[position], kind.fixed_spelling(), kind);
  979. int32_t byte_offset = position;
  980. ++position;
  981. // Lex the opening symbol with a zero closing index. We'll add a payload later
  982. // when we match a closing symbol or in recovery.
  983. TokenIndex token = LexToken(kind, byte_offset);
  984. open_groups_.push_back(token);
  985. return token;
  986. }
  987. auto Lexer::LexClosingSymbolToken(llvm::StringRef source_text, TokenKind kind,
  988. ssize_t& position) -> LexResult {
  989. CARBON_DCHECK(kind.is_closing_symbol());
  990. CARBON_DCHECK(kind.fixed_spelling().size() == 1);
  991. CARBON_DCHECK(source_text[position] == kind.fixed_spelling().front(),
  992. "Source text starts with '{0}' instead of the spelling '{1}' "
  993. "of the incoming token kind '{2}'",
  994. source_text[position], kind.fixed_spelling(), kind);
  995. int32_t byte_offset = position;
  996. ++position;
  997. // If there's not a matching opening symbol, just track that we had an error.
  998. // We will diagnose and recover when we reach the end of the file. See
  999. // `DiagnoseAndFixMismatchedBrackets` for details.
  1000. if (LLVM_UNLIKELY(open_groups_.empty())) {
  1001. has_mismatched_brackets_ = true;
  1002. // Lex without a matching index payload -- we'll add one during recovery.
  1003. return LexToken(kind, byte_offset);
  1004. }
  1005. TokenIndex opening_token = open_groups_.pop_back_val();
  1006. TokenIndex token =
  1007. LexTokenWithPayload(kind, opening_token.index, byte_offset);
  1008. auto& opening_token_info = buffer_.GetTokenInfo(opening_token);
  1009. if (LLVM_UNLIKELY(opening_token_info.kind() != kind.opening_symbol())) {
  1010. has_mismatched_brackets_ = true;
  1011. buffer_.GetTokenInfo(token).set_opening_token_index(TokenIndex::None);
  1012. return token;
  1013. }
  1014. opening_token_info.set_closing_token_index(token);
  1015. return token;
  1016. }
  1017. auto Lexer::LexSymbolToken(llvm::StringRef source_text, ssize_t& position)
  1018. -> LexResult {
  1019. // One character symbols and grouping symbols are handled with dedicated
  1020. // dispatch. We only lex the multi-character tokens here.
  1021. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text.substr(position))
  1022. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  1023. .StartsWith(Spelling, TokenKind::Name)
  1024. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling)
  1025. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName)
  1026. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName)
  1027. #include "toolchain/lex/token_kind.def"
  1028. .Default(TokenKind::Error);
  1029. if (kind == TokenKind::Error) {
  1030. return LexError(source_text, position);
  1031. }
  1032. TokenIndex token = LexToken(kind, position);
  1033. position += kind.fixed_spelling().size();
  1034. return token;
  1035. }
  1036. auto Lexer::LexWordAsTypeLiteralToken(llvm::StringRef word, int32_t byte_offset)
  1037. -> LexResult {
  1038. if (word.size() < 2) {
  1039. // Too short to form one of these tokens.
  1040. return LexResult::NoMatch();
  1041. }
  1042. TokenKind kind;
  1043. switch (word.front()) {
  1044. case 'i':
  1045. kind = TokenKind::IntTypeLiteral;
  1046. break;
  1047. case 'u':
  1048. kind = TokenKind::UnsignedIntTypeLiteral;
  1049. break;
  1050. case 'f':
  1051. kind = TokenKind::FloatTypeLiteral;
  1052. break;
  1053. default:
  1054. return LexResult::NoMatch();
  1055. };
  1056. // No leading zeros allowed.
  1057. if ('1' > word[1] || word[1] > '9') {
  1058. return LexResult::NoMatch();
  1059. }
  1060. llvm::StringRef suffix = word.substr(1);
  1061. // Type bit-widths can't usefully be large integers so we restrict to small
  1062. // ones that are especially easy to parse into a normal integer variable by
  1063. // restricting the number of digits to round trip.
  1064. int64_t suffix_value;
  1065. constexpr ssize_t DigitLimit =
  1066. std::numeric_limits<decltype(suffix_value)>::digits10;
  1067. if (suffix.size() > DigitLimit) {
  1068. // See if this is not actually a type literal.
  1069. if (!llvm::all_of(suffix, IsDecimalDigit)) {
  1070. return LexResult::NoMatch();
  1071. }
  1072. // Otherwise, diagnose and produce an error token.
  1073. CARBON_DIAGNOSTIC(TooManyTypeBitWidthDigits, Error,
  1074. "found a type literal with a bit width using {0} digits, "
  1075. "which is greater than the limit of {1}",
  1076. size_t, size_t);
  1077. emitter_.Emit(word.begin() + 1, TooManyTypeBitWidthDigits, suffix.size(),
  1078. DigitLimit);
  1079. return LexTokenWithPayload(TokenKind::Error, word.size(), byte_offset);
  1080. }
  1081. // It's tempting to do something more clever because we know the length ahead
  1082. // of time, but we expect these to be short (1-3 digits) and profiling doesn't
  1083. // show the loop as hot in the short cases.
  1084. suffix_value = suffix[0] - '0';
  1085. for (char c : suffix.drop_front()) {
  1086. if (!IsDecimalDigit(c)) {
  1087. return LexResult::NoMatch();
  1088. }
  1089. suffix_value = suffix_value * 10 + (c - '0');
  1090. }
  1091. // Add the bit width to our integer store and get its index. We treat it as
  1092. // unsigned as that's less expensive and it can't be negative.
  1093. CARBON_CHECK(suffix_value >= 0);
  1094. auto bit_width_payload =
  1095. buffer_.value_stores_->ints().Add(suffix_value).AsTokenPayload();
  1096. return LexTokenWithPayload(kind, bit_width_payload, byte_offset);
  1097. }
  1098. auto Lexer::LexKeywordOrIdentifier(llvm::StringRef source_text,
  1099. ssize_t& position) -> LexResult {
  1100. if (static_cast<unsigned char>(source_text[position]) > 0x7F) {
  1101. // TODO: Need to add support for Unicode lexing.
  1102. return LexError(source_text, position);
  1103. }
  1104. CARBON_CHECK(
  1105. IsIdStartByteTable[static_cast<unsigned char>(source_text[position])]);
  1106. // Capture the position before we step past the token.
  1107. int32_t byte_offset = position;
  1108. // Take the valid characters off the front of the source buffer.
  1109. llvm::StringRef identifier_text =
  1110. ScanForIdentifierPrefix(source_text.substr(position));
  1111. CARBON_CHECK(!identifier_text.empty(), "Must have at least one character!");
  1112. position += identifier_text.size();
  1113. // Check if the text is a type literal, and if so form such a literal.
  1114. if (LexResult result =
  1115. LexWordAsTypeLiteralToken(identifier_text, byte_offset)) {
  1116. return result;
  1117. }
  1118. // Check if the text matches a keyword token, and if so use that.
  1119. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  1120. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name)
  1121. #include "toolchain/lex/token_kind.def"
  1122. .Default(TokenKind::Error);
  1123. if (kind != TokenKind::Error) {
  1124. return LexToken(kind, byte_offset);
  1125. }
  1126. // Otherwise we have a generic identifier.
  1127. return LexTokenWithPayload(
  1128. TokenKind::Identifier,
  1129. buffer_.value_stores_->identifiers().Add(identifier_text).index,
  1130. byte_offset);
  1131. }
  1132. auto Lexer::LexHash(llvm::StringRef source_text, ssize_t& position)
  1133. -> LexResult {
  1134. // For `r#`, we already lexed an `r` identifier token. Detect that case and
  1135. // replace that token with a raw identifier. We do this to keep identifier
  1136. // lexing as fast as possible.
  1137. // Look for the `r` token. Note that this is always in bounds because we
  1138. // create a start of file token.
  1139. auto& prev_token_info = buffer_.token_infos_.back();
  1140. // If the previous token isn't the identifier `r`, or the character after `#`
  1141. // isn't the start of an identifier, this is not a raw identifier.
  1142. if (prev_token_info.kind() != TokenKind::Identifier ||
  1143. source_text[position - 1] != 'r' ||
  1144. position + 1 == static_cast<ssize_t>(source_text.size()) ||
  1145. !IsIdStartByteTable[static_cast<unsigned char>(
  1146. source_text[position + 1])] ||
  1147. prev_token_info.byte_offset() != static_cast<int32_t>(position) - 1) {
  1148. [[clang::musttail]] return LexStringLiteral(source_text, position);
  1149. }
  1150. CARBON_DCHECK(buffer_.value_stores_->identifiers().Get(
  1151. prev_token_info.ident_id()) == "r");
  1152. // Take the valid characters off the front of the source buffer.
  1153. llvm::StringRef identifier_text =
  1154. ScanForIdentifierPrefix(source_text.substr(position + 1));
  1155. CARBON_CHECK(!identifier_text.empty(), "Must have at least one character!");
  1156. position += 1 + identifier_text.size();
  1157. // Replace the `r` identifier's value with the raw identifier.
  1158. // TODO: This token doesn't carry any indicator that it's raw, so
  1159. // diagnostics are unclear.
  1160. prev_token_info.set_ident_id(
  1161. buffer_.value_stores_->identifiers().Add(identifier_text));
  1162. return LexResult(TokenIndex(buffer_.token_infos_.size() - 1));
  1163. }
  1164. auto Lexer::LexError(llvm::StringRef source_text, ssize_t& position)
  1165. -> LexResult {
  1166. llvm::StringRef error_text =
  1167. source_text.substr(position).take_while([](char c) {
  1168. if (IsAlnum(c)) {
  1169. return false;
  1170. }
  1171. switch (c) {
  1172. case '_':
  1173. case '\t':
  1174. case '\n':
  1175. return false;
  1176. default:
  1177. break;
  1178. }
  1179. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  1180. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  1181. #include "toolchain/lex/token_kind.def"
  1182. .Default(true);
  1183. });
  1184. if (error_text.empty()) {
  1185. // TODO: Reimplement this to use the lexer properly. In the meantime,
  1186. // guarantee that we eat at least one byte.
  1187. error_text = source_text.substr(position, 1);
  1188. }
  1189. auto token =
  1190. LexTokenWithPayload(TokenKind::Error, error_text.size(), position);
  1191. CARBON_DIAGNOSTIC(UnrecognizedCharacters, Error,
  1192. "encountered unrecognized characters while parsing");
  1193. emitter_.Emit(error_text.begin(), UnrecognizedCharacters);
  1194. position += error_text.size();
  1195. return token;
  1196. }
  1197. auto Lexer::LexFileStart(llvm::StringRef source_text, ssize_t& position)
  1198. -> void {
  1199. CARBON_CHECK(position == 0);
  1200. // Before lexing any source text, add the start-of-file token so that code
  1201. // can assume a non-empty token buffer for the rest of lexing.
  1202. LexToken(TokenKind::FileStart, 0);
  1203. // The file start also represents whitespace.
  1204. NoteWhitespace();
  1205. // Also skip any horizontal whitespace and record the indentation of the
  1206. // first line.
  1207. SkipHorizontalWhitespace(source_text, position);
  1208. auto* line_info = current_line_info();
  1209. CARBON_CHECK(line_info->start == 0);
  1210. line_info->indent = position;
  1211. }
  1212. auto Lexer::LexFileEnd(llvm::StringRef source_text, ssize_t position) -> void {
  1213. CARBON_CHECK(position == static_cast<ssize_t>(source_text.size()));
  1214. // Check if the last line is empty and not the first line (and only). If so,
  1215. // re-pin the last line to be the prior one so that diagnostics and editors
  1216. // can treat newlines as terminators even though we internally handle them
  1217. // as separators in case of a missing newline on the last line. We do this
  1218. // here instead of detecting this when we see the newline to avoid more
  1219. // conditions along that fast path.
  1220. if (position == current_line_info()->start && line_index_ != 0) {
  1221. --line_index_;
  1222. --position;
  1223. }
  1224. // The end-of-file token is always considered to be whitespace.
  1225. NoteWhitespace();
  1226. LexToken(TokenKind::FileEnd, position);
  1227. }
  1228. auto Lexer::Finalize() -> void {
  1229. // If we had any mismatched brackets, issue diagnostics and fix them.
  1230. if (has_mismatched_brackets_ || !open_groups_.empty()) {
  1231. DiagnoseAndFixMismatchedBrackets();
  1232. }
  1233. // Reject source files with so many tokens that we may have exceeded the
  1234. // number of bits in `token_payload_`.
  1235. //
  1236. // Note that we rely on this check also catching the case where there are too
  1237. // many identifiers to fit an `IdentifierId` into a `token_payload_`, and
  1238. // likewise for `IntId` and so on. If we start adding any of those IDs prior
  1239. // to lexing, we may need to also limit the number of those IDs here.
  1240. if (buffer_.token_infos_.size() > TokenIndex::Max) {
  1241. CARBON_DIAGNOSTIC(TooManyTokens, Error,
  1242. "too many tokens in source file; try splitting into "
  1243. "multiple source files");
  1244. // Subtract one to leave room for the `FileEnd` token.
  1245. token_emitter_.Emit(TokenIndex(TokenIndex::Max - 1), TooManyTokens);
  1246. // TODO: Convert tokens after the token limit to error tokens to avoid
  1247. // misinterpretation by consumers of the tokenized buffer.
  1248. }
  1249. }
  1250. // A list of pending insertions to make into a tokenized buffer for error
  1251. // recovery. These are buffered so that we can perform them in linear time.
  1252. class Lexer::ErrorRecoveryBuffer {
  1253. public:
  1254. // `buffer` must not be null.
  1255. explicit ErrorRecoveryBuffer(TokenizedBuffer* buffer) : buffer_(buffer) {}
  1256. auto empty() const -> bool {
  1257. return new_tokens_.empty() && !any_error_tokens_;
  1258. }
  1259. // Insert a recovery token of kind `kind` before `insert_before`. Note that we
  1260. // currently require insertions to be specified in source order, but this
  1261. // restriction would be easy to relax.
  1262. auto InsertBefore(TokenIndex insert_before, TokenKind kind) -> void {
  1263. CARBON_CHECK(insert_before.index > 0,
  1264. "Cannot insert before the start of file token.");
  1265. CARBON_CHECK(
  1266. insert_before.index < static_cast<int>(buffer_->token_infos_.size()),
  1267. "Cannot insert after the end of file token.");
  1268. CARBON_CHECK(
  1269. new_tokens_.empty() || new_tokens_.back().first <= insert_before,
  1270. "Insertions performed out of order.");
  1271. // If the `insert_before` token has leading whitespace, mark the
  1272. // inserted token as also having leading whitespace. This avoids changing
  1273. // whether the prior tokens had leading or trailing whitespace when
  1274. // inserting.
  1275. bool insert_leading_space = buffer_->HasLeadingWhitespace(insert_before);
  1276. // Find the end of the token before the target token, and add the new token
  1277. // there.
  1278. TokenIndex insert_after(insert_before.index - 1);
  1279. const auto& prev_info = buffer_->GetTokenInfo(insert_after);
  1280. int32_t byte_offset =
  1281. prev_info.byte_offset() + buffer_->GetTokenText(insert_after).size();
  1282. new_tokens_.push_back(
  1283. {insert_before, TokenInfo(kind, insert_leading_space, byte_offset)});
  1284. }
  1285. // Replace the given token with an error token. We do this immediately,
  1286. // because we don't benefit from buffering it.
  1287. auto ReplaceWithError(TokenIndex token) -> void {
  1288. auto& token_info = buffer_->GetTokenInfo(token);
  1289. int error_length = buffer_->GetTokenText(token).size();
  1290. token_info.ResetAsError(error_length);
  1291. any_error_tokens_ = true;
  1292. }
  1293. // Merge the recovery tokens into the token list of the tokenized buffer.
  1294. auto Apply() -> void {
  1295. auto old_tokens = std::move(buffer_->token_infos_);
  1296. buffer_->token_infos_.clear();
  1297. int new_size = old_tokens.size() + new_tokens_.size();
  1298. buffer_->token_infos_.reserve(new_size);
  1299. buffer_->recovery_tokens_.resize(new_size);
  1300. int old_tokens_offset = 0;
  1301. for (auto [next_offset, info] : new_tokens_) {
  1302. buffer_->token_infos_.append(old_tokens.begin() + old_tokens_offset,
  1303. old_tokens.begin() + next_offset.index);
  1304. buffer_->AddToken(info);
  1305. buffer_->recovery_tokens_.set(next_offset.index);
  1306. old_tokens_offset = next_offset.index;
  1307. }
  1308. buffer_->token_infos_.append(old_tokens.begin() + old_tokens_offset,
  1309. old_tokens.end());
  1310. }
  1311. // Perform bracket matching to fix cross-references between tokens. This must
  1312. // be done after all recovery is performed and all brackets match, because
  1313. // recovery will change token indexes.
  1314. auto FixTokenCrossReferences() -> void {
  1315. llvm::SmallVector<TokenIndex> open_groups;
  1316. for (auto token : buffer_->tokens()) {
  1317. auto kind = buffer_->GetKind(token);
  1318. if (kind.is_opening_symbol()) {
  1319. open_groups.push_back(token);
  1320. } else if (kind.is_closing_symbol()) {
  1321. CARBON_CHECK(!open_groups.empty(), "Failed to balance brackets");
  1322. auto opening_token = open_groups.pop_back_val();
  1323. CARBON_CHECK(
  1324. kind ==
  1325. buffer_->GetTokenInfo(opening_token).kind().closing_symbol(),
  1326. "Failed to balance brackets");
  1327. auto& opening_token_info = buffer_->GetTokenInfo(opening_token);
  1328. auto& closing_token_info = buffer_->GetTokenInfo(token);
  1329. opening_token_info.set_closing_token_index(token);
  1330. closing_token_info.set_opening_token_index(opening_token);
  1331. }
  1332. }
  1333. }
  1334. private:
  1335. TokenizedBuffer* buffer_;
  1336. // A list of tokens to insert into the token stream to fix mismatched
  1337. // brackets. The first element in each pair is the original token index to
  1338. // insert the new token before.
  1339. llvm::SmallVector<std::pair<TokenIndex, TokenizedBuffer::TokenInfo>>
  1340. new_tokens_;
  1341. // Whether we have changed any tokens into error tokens.
  1342. bool any_error_tokens_ = false;
  1343. };
  1344. // Issue an UnmatchedOpening diagnostic.
  1345. static auto DiagnoseUnmatchedOpening(Diagnostics::Emitter<TokenIndex>& emitter,
  1346. TokenIndex opening_token) -> void {
  1347. CARBON_DIAGNOSTIC(UnmatchedOpening, Error,
  1348. "opening symbol without a corresponding closing symbol");
  1349. emitter.Emit(opening_token, UnmatchedOpening);
  1350. }
  1351. // If brackets didn't pair or nest properly, find a set of places to insert
  1352. // brackets to fix the nesting, issue suitable diagnostics, and update the
  1353. // token list to describe the fixes.
  1354. auto Lexer::DiagnoseAndFixMismatchedBrackets() -> void {
  1355. ErrorRecoveryBuffer fixes(&buffer_);
  1356. // Look for mismatched brackets and decide where to add tokens to fix them.
  1357. //
  1358. // TODO: For now, we use a greedy algorithm for this. We could do better by
  1359. // taking indentation into account. For example:
  1360. //
  1361. // 1 fn F() {
  1362. // 2 if (thing1)
  1363. // 3 thing2;
  1364. // 4 }
  1365. // 5 }
  1366. //
  1367. // Here, we'll match the `{` on line 1 with the `}` on line 4, and then
  1368. // report that the `}` on line 5 is unmatched. Instead, we should notice that
  1369. // line 1 matches better with line 5 due to indentation, and work out that
  1370. // the missing `{` was on line 2, also based on indentation.
  1371. open_groups_.clear();
  1372. for (auto token : buffer_.tokens()) {
  1373. auto kind = buffer_.GetKind(token);
  1374. if (kind.is_opening_symbol()) {
  1375. open_groups_.push_back(token);
  1376. continue;
  1377. }
  1378. if (!kind.is_closing_symbol()) {
  1379. continue;
  1380. }
  1381. // Find the innermost matching opening symbol.
  1382. auto opening_it = llvm::find_if(
  1383. llvm::reverse(open_groups_), [&](TokenIndex opening_token) {
  1384. return buffer_.GetTokenInfo(opening_token).kind().closing_symbol() ==
  1385. kind;
  1386. });
  1387. if (opening_it == open_groups_.rend()) {
  1388. CARBON_DIAGNOSTIC(
  1389. UnmatchedClosing, Error,
  1390. "closing symbol without a corresponding opening symbol");
  1391. token_emitter_.Emit(token, UnmatchedClosing);
  1392. fixes.ReplaceWithError(token);
  1393. continue;
  1394. }
  1395. // All intermediate open tokens have no matching close token.
  1396. for (auto it = open_groups_.rbegin(); it != opening_it; ++it) {
  1397. DiagnoseUnmatchedOpening(token_emitter_, *it);
  1398. // Add a closing bracket for the unclosed group here.
  1399. //
  1400. // TODO: Indicate in the diagnostic that we did this, perhaps by
  1401. // annotating the snippet.
  1402. auto opening_kind = buffer_.GetKind(*it);
  1403. fixes.InsertBefore(token, opening_kind.closing_symbol());
  1404. }
  1405. open_groups_.erase(opening_it.base() - 1, open_groups_.end());
  1406. }
  1407. // Diagnose any remaining unmatched opening symbols.
  1408. for (auto token : open_groups_) {
  1409. // We don't have a good location to insert a close bracket. Convert the
  1410. // opening token from a bracket to an error.
  1411. DiagnoseUnmatchedOpening(token_emitter_, token);
  1412. fixes.ReplaceWithError(token);
  1413. }
  1414. CARBON_CHECK(!fixes.empty(), "Didn't find anything to fix");
  1415. fixes.Apply();
  1416. fixes.FixTokenCrossReferences();
  1417. }
  1418. auto Lex(SharedValueStores& value_stores, SourceBuffer& source,
  1419. Diagnostics::Consumer& consumer) -> TokenizedBuffer {
  1420. return Lexer(value_stores, source, consumer).Lex();
  1421. }
  1422. } // namespace Carbon::Lex