tokenized_buffer.cpp 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/lexer/tokenized_buffer.h"
  5. #include <algorithm>
  6. #include <array>
  7. #include <cmath>
  8. #include <iterator>
  9. #include <string>
  10. #include "common/check.h"
  11. #include "common/string_helpers.h"
  12. #include "llvm/ADT/STLExtras.h"
  13. #include "llvm/ADT/StringRef.h"
  14. #include "llvm/ADT/StringSwitch.h"
  15. #include "llvm/Support/ErrorHandling.h"
  16. #include "llvm/Support/Format.h"
  17. #include "llvm/Support/FormatVariadic.h"
  18. #include "llvm/Support/raw_ostream.h"
  19. #include "toolchain/lexer/character_set.h"
  20. #include "toolchain/lexer/lex_helpers.h"
  21. #include "toolchain/lexer/numeric_literal.h"
  22. #include "toolchain/lexer/string_literal.h"
  23. namespace Carbon {
  24. // TODO: Move Overload and VariantMatch somewhere more central.
  25. // Form an overload set from a list of functions. For example:
  26. //
  27. // ```
  28. // auto overloaded = Overload{[] (int) {}, [] (float) {}};
  29. // ```
  30. template <typename... Fs>
  31. struct Overload : Fs... {
  32. using Fs::operator()...;
  33. };
  34. template <typename... Fs>
  35. Overload(Fs...) -> Overload<Fs...>;
  36. // Pattern-match against the type of the value stored in the variant `V`. Each
  37. // element of `fs` should be a function that takes one or more of the variant
  38. // values in `V`.
  39. template <typename V, typename... Fs>
  40. auto VariantMatch(V&& v, Fs&&... fs) -> decltype(auto) {
  41. return std::visit(Overload{std::forward<Fs&&>(fs)...}, std::forward<V&&>(v));
  42. }
  43. // Implementation of the lexer logic itself.
  44. //
  45. // The design is that lexing can loop over the source buffer, consuming it into
  46. // tokens by calling into this API. This class handles the state and breaks down
  47. // the different lexing steps that may be used. It directly updates the provided
  48. // tokenized buffer with the lexed tokens.
  49. class TokenizedBuffer::Lexer {
  50. public:
  51. // Symbolic result of a lexing action. This indicates whether we successfully
  52. // lexed a token, or whether other lexing actions should be attempted.
  53. //
  54. // While it wraps a simple boolean state, its API both helps make the failures
  55. // more self documenting, and by consuming the actual token constructively
  56. // when one is produced, it helps ensure the correct result is returned.
  57. class LexResult {
  58. public:
  59. // Consumes (and discard) a valid token to construct a result
  60. // indicating a token has been produced. Relies on implicit conversions.
  61. // NOLINTNEXTLINE(google-explicit-constructor)
  62. LexResult(Token /*discarded_token*/) : LexResult(true) {}
  63. // Returns a result indicating no token was produced.
  64. static auto NoMatch() -> LexResult { return LexResult(false); }
  65. // Tests whether a token was produced by the lexing routine, and
  66. // the lexer can continue forming tokens.
  67. explicit operator bool() const { return formed_token_; }
  68. private:
  69. explicit LexResult(bool formed_token) : formed_token_(formed_token) {}
  70. bool formed_token_;
  71. };
  72. Lexer(TokenizedBuffer& buffer, DiagnosticConsumer& consumer)
  73. : buffer_(&buffer),
  74. translator_(&buffer, &current_column_),
  75. emitter_(translator_, consumer),
  76. token_translator_(&buffer, &current_column_),
  77. token_emitter_(token_translator_, consumer),
  78. current_line_(buffer.AddLine(LineInfo(0))),
  79. current_line_info_(&buffer.GetLineInfo(current_line_)) {}
  80. // Perform the necessary bookkeeping to step past a newline at the current
  81. // line and column.
  82. auto HandleNewline() -> void {
  83. current_line_info_->length = current_column_;
  84. current_line_ = buffer_->AddLine(
  85. LineInfo(current_line_info_->start + current_column_ + 1));
  86. current_line_info_ = &buffer_->GetLineInfo(current_line_);
  87. current_column_ = 0;
  88. set_indent_ = false;
  89. }
  90. auto NoteWhitespace() -> void {
  91. if (!buffer_->token_infos_.empty()) {
  92. buffer_->token_infos_.back().has_trailing_space = true;
  93. }
  94. }
  95. auto SkipWhitespace(llvm::StringRef& source_text) -> bool {
  96. const char* const whitespace_start = source_text.begin();
  97. while (!source_text.empty()) {
  98. // We only support line-oriented commenting and lex comments as-if they
  99. // were whitespace.
  100. if (source_text.startswith("//")) {
  101. // Any comment must be the only non-whitespace on the line.
  102. if (set_indent_) {
  103. CARBON_DIAGNOSTIC(TrailingComment, Error,
  104. "Trailing comments are not permitted.");
  105. emitter_.Emit(source_text.begin(), TrailingComment);
  106. }
  107. // The introducer '//' must be followed by whitespace or EOF.
  108. if (source_text.size() > 2 && !IsSpace(source_text[2])) {
  109. CARBON_DIAGNOSTIC(NoWhitespaceAfterCommentIntroducer, Error,
  110. "Whitespace is required after '//'.");
  111. emitter_.Emit(source_text.begin() + 2,
  112. NoWhitespaceAfterCommentIntroducer);
  113. }
  114. while (!source_text.empty() && source_text.front() != '\n') {
  115. ++current_column_;
  116. source_text = source_text.drop_front();
  117. }
  118. if (source_text.empty()) {
  119. break;
  120. }
  121. }
  122. switch (source_text.front()) {
  123. default:
  124. // If we find a non-whitespace character without exhausting the
  125. // buffer, return true to continue lexing.
  126. CARBON_CHECK(!IsSpace(source_text.front()));
  127. if (whitespace_start != source_text.begin()) {
  128. NoteWhitespace();
  129. }
  130. return true;
  131. case '\n':
  132. // If this is the last character in the source, directly return here
  133. // to avoid creating an empty line.
  134. source_text = source_text.drop_front();
  135. if (source_text.empty()) {
  136. current_line_info_->length = current_column_;
  137. return false;
  138. }
  139. // Otherwise, add a line and set up to continue lexing.
  140. HandleNewline();
  141. continue;
  142. case ' ':
  143. case '\t':
  144. // Skip other forms of whitespace while tracking column.
  145. // TODO: This obviously needs looooots more work to handle unicode
  146. // whitespace as well as special handling to allow better tokenization
  147. // of operators. This is just a stub to check that our column
  148. // management works.
  149. ++current_column_;
  150. source_text = source_text.drop_front();
  151. continue;
  152. }
  153. }
  154. CARBON_CHECK(source_text.empty())
  155. << "Cannot reach here w/o finishing the text!";
  156. // Update the line length as this is also the end of a line.
  157. current_line_info_->length = current_column_;
  158. return false;
  159. }
  160. auto LexNumericLiteral(llvm::StringRef& source_text) -> LexResult {
  161. std::optional<LexedNumericLiteral> literal =
  162. LexedNumericLiteral::Lex(source_text);
  163. if (!literal) {
  164. return LexResult::NoMatch();
  165. }
  166. int int_column = current_column_;
  167. int token_size = literal->text().size();
  168. current_column_ += token_size;
  169. source_text = source_text.drop_front(token_size);
  170. if (!set_indent_) {
  171. current_line_info_->indent = int_column;
  172. set_indent_ = true;
  173. }
  174. return VariantMatch(
  175. literal->ComputeValue(emitter_),
  176. [&](LexedNumericLiteral::IntegerValue&& value) {
  177. auto token = buffer_->AddToken({.kind = TokenKind::IntegerLiteral,
  178. .token_line = current_line_,
  179. .column = int_column});
  180. buffer_->GetTokenInfo(token).literal_index =
  181. buffer_->literal_int_storage_.size();
  182. buffer_->literal_int_storage_.push_back(std::move(value.value));
  183. return token;
  184. },
  185. [&](LexedNumericLiteral::RealValue&& value) {
  186. auto token = buffer_->AddToken({.kind = TokenKind::RealLiteral,
  187. .token_line = current_line_,
  188. .column = int_column});
  189. buffer_->GetTokenInfo(token).literal_index =
  190. buffer_->literal_int_storage_.size();
  191. buffer_->literal_int_storage_.push_back(std::move(value.mantissa));
  192. buffer_->literal_int_storage_.push_back(std::move(value.exponent));
  193. CARBON_CHECK(buffer_->GetRealLiteral(token).IsDecimal() ==
  194. (value.radix == LexedNumericLiteral::Radix::Decimal));
  195. return token;
  196. },
  197. [&](LexedNumericLiteral::UnrecoverableError) {
  198. auto token = buffer_->AddToken({
  199. .kind = TokenKind::Error,
  200. .token_line = current_line_,
  201. .column = int_column,
  202. .error_length = token_size,
  203. });
  204. return token;
  205. });
  206. }
  207. auto LexStringLiteral(llvm::StringRef& source_text) -> LexResult {
  208. std::optional<LexedStringLiteral> literal =
  209. LexedStringLiteral::Lex(source_text);
  210. if (!literal) {
  211. return LexResult::NoMatch();
  212. }
  213. Line string_line = current_line_;
  214. int string_column = current_column_;
  215. int literal_size = literal->text().size();
  216. source_text = source_text.drop_front(literal_size);
  217. if (!set_indent_) {
  218. current_line_info_->indent = string_column;
  219. set_indent_ = true;
  220. }
  221. // Update line and column information.
  222. if (!literal->is_multi_line()) {
  223. current_column_ += literal_size;
  224. } else {
  225. for (char c : literal->text()) {
  226. if (c == '\n') {
  227. HandleNewline();
  228. // The indentation of all lines in a multi-line string literal is
  229. // that of the first line.
  230. current_line_info_->indent = string_column;
  231. set_indent_ = true;
  232. } else {
  233. ++current_column_;
  234. }
  235. }
  236. }
  237. if (literal->is_terminated()) {
  238. auto token =
  239. buffer_->AddToken({.kind = TokenKind::StringLiteral,
  240. .token_line = string_line,
  241. .column = string_column,
  242. .literal_index = static_cast<int32_t>(
  243. buffer_->literal_string_storage_.size())});
  244. buffer_->literal_string_storage_.push_back(
  245. literal->ComputeValue(emitter_));
  246. return token;
  247. } else {
  248. CARBON_DIAGNOSTIC(UnterminatedString, Error,
  249. "String is missing a terminator.");
  250. emitter_.Emit(literal->text().begin(), UnterminatedString);
  251. return buffer_->AddToken({.kind = TokenKind::Error,
  252. .token_line = string_line,
  253. .column = string_column,
  254. .error_length = literal_size});
  255. }
  256. }
  257. auto LexSymbolToken(llvm::StringRef& source_text) -> LexResult {
  258. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text)
  259. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  260. .StartsWith(Spelling, TokenKind::Name)
  261. #include "toolchain/lexer/token_kind.def"
  262. .Default(TokenKind::Error);
  263. if (kind == TokenKind::Error) {
  264. return LexResult::NoMatch();
  265. }
  266. if (!set_indent_) {
  267. current_line_info_->indent = current_column_;
  268. set_indent_ = true;
  269. }
  270. CloseInvalidOpenGroups(kind);
  271. const char* location = source_text.begin();
  272. Token token = buffer_->AddToken(
  273. {.kind = kind, .token_line = current_line_, .column = current_column_});
  274. current_column_ += kind.fixed_spelling().size();
  275. source_text = source_text.drop_front(kind.fixed_spelling().size());
  276. // Opening symbols just need to be pushed onto our queue of opening groups.
  277. if (kind.is_opening_symbol()) {
  278. open_groups_.push_back(token);
  279. return token;
  280. }
  281. // Only closing symbols need further special handling.
  282. if (!kind.is_closing_symbol()) {
  283. return token;
  284. }
  285. TokenInfo& closing_token_info = buffer_->GetTokenInfo(token);
  286. // Check that there is a matching opening symbol before we consume this as
  287. // a closing symbol.
  288. if (open_groups_.empty()) {
  289. closing_token_info.kind = TokenKind::Error;
  290. closing_token_info.error_length = kind.fixed_spelling().size();
  291. CARBON_DIAGNOSTIC(
  292. UnmatchedClosing, Error,
  293. "Closing symbol without a corresponding opening symbol.");
  294. emitter_.Emit(location, UnmatchedClosing);
  295. // Note that this still returns true as we do consume a symbol.
  296. return token;
  297. }
  298. // Finally can handle a normal closing symbol.
  299. Token opening_token = open_groups_.pop_back_val();
  300. TokenInfo& opening_token_info = buffer_->GetTokenInfo(opening_token);
  301. opening_token_info.closing_token = token;
  302. closing_token_info.opening_token = opening_token;
  303. return token;
  304. }
  305. // Given a word that has already been lexed, determine whether it is a type
  306. // literal and if so form the corresponding token.
  307. auto LexWordAsTypeLiteralToken(llvm::StringRef word, int column)
  308. -> LexResult {
  309. if (word.size() < 2) {
  310. // Too short to form one of these tokens.
  311. return LexResult::NoMatch();
  312. }
  313. if (!('1' <= word[1] && word[1] <= '9')) {
  314. // Doesn't start with a valid initial digit.
  315. return LexResult::NoMatch();
  316. }
  317. std::optional<TokenKind> kind;
  318. switch (word.front()) {
  319. case 'i':
  320. kind = TokenKind::IntegerTypeLiteral;
  321. break;
  322. case 'u':
  323. kind = TokenKind::UnsignedIntegerTypeLiteral;
  324. break;
  325. case 'f':
  326. kind = TokenKind::FloatingPointTypeLiteral;
  327. break;
  328. default:
  329. return LexResult::NoMatch();
  330. };
  331. llvm::StringRef suffix = word.substr(1);
  332. if (!CanLexInteger(emitter_, suffix)) {
  333. return buffer_->AddToken(
  334. {.kind = TokenKind::Error,
  335. .token_line = current_line_,
  336. .column = column,
  337. .error_length = static_cast<int32_t>(word.size())});
  338. }
  339. llvm::APInt suffix_value;
  340. if (suffix.getAsInteger(10, suffix_value)) {
  341. return LexResult::NoMatch();
  342. }
  343. auto token = buffer_->AddToken(
  344. {.kind = *kind, .token_line = current_line_, .column = column});
  345. buffer_->GetTokenInfo(token).literal_index =
  346. buffer_->literal_int_storage_.size();
  347. buffer_->literal_int_storage_.push_back(std::move(suffix_value));
  348. return token;
  349. }
  350. // Closes all open groups that cannot remain open across the symbol `K`.
  351. // Users may pass `Error` to close all open groups.
  352. auto CloseInvalidOpenGroups(TokenKind kind) -> void {
  353. if (!kind.is_closing_symbol() && kind != TokenKind::Error) {
  354. return;
  355. }
  356. while (!open_groups_.empty()) {
  357. Token opening_token = open_groups_.back();
  358. TokenKind opening_kind = buffer_->GetTokenInfo(opening_token).kind;
  359. if (kind == opening_kind.closing_symbol()) {
  360. return;
  361. }
  362. open_groups_.pop_back();
  363. CARBON_DIAGNOSTIC(
  364. MismatchedClosing, Error,
  365. "Closing symbol does not match most recent opening symbol.");
  366. token_emitter_.Emit(opening_token, MismatchedClosing);
  367. CARBON_CHECK(!buffer_->tokens().empty())
  368. << "Must have a prior opening token!";
  369. Token prev_token = buffer_->tokens().end()[-1];
  370. // TODO: do a smarter backwards scan for where to put the closing
  371. // token.
  372. Token closing_token = buffer_->AddToken(
  373. {.kind = opening_kind.closing_symbol(),
  374. .has_trailing_space = buffer_->HasTrailingWhitespace(prev_token),
  375. .is_recovery = true,
  376. .token_line = current_line_,
  377. .column = current_column_});
  378. TokenInfo& opening_token_info = buffer_->GetTokenInfo(opening_token);
  379. TokenInfo& closing_token_info = buffer_->GetTokenInfo(closing_token);
  380. opening_token_info.closing_token = closing_token;
  381. closing_token_info.opening_token = opening_token;
  382. }
  383. }
  384. auto GetOrCreateIdentifier(llvm::StringRef text) -> Identifier {
  385. auto insert_result = buffer_->identifier_map_.insert(
  386. {text, Identifier(buffer_->identifier_infos_.size())});
  387. if (insert_result.second) {
  388. buffer_->identifier_infos_.push_back({text});
  389. }
  390. return insert_result.first->second;
  391. }
  392. auto LexKeywordOrIdentifier(llvm::StringRef& source_text) -> LexResult {
  393. if (!IsAlpha(source_text.front()) && source_text.front() != '_') {
  394. return LexResult::NoMatch();
  395. }
  396. if (!set_indent_) {
  397. current_line_info_->indent = current_column_;
  398. set_indent_ = true;
  399. }
  400. // Take the valid characters off the front of the source buffer.
  401. llvm::StringRef identifier_text =
  402. source_text.take_while([](char c) { return IsAlnum(c) || c == '_'; });
  403. CARBON_CHECK(!identifier_text.empty())
  404. << "Must have at least one character!";
  405. int identifier_column = current_column_;
  406. current_column_ += identifier_text.size();
  407. source_text = source_text.drop_front(identifier_text.size());
  408. // Check if the text is a type literal, and if so form such a literal.
  409. if (LexResult result =
  410. LexWordAsTypeLiteralToken(identifier_text, identifier_column)) {
  411. return result;
  412. }
  413. // Check if the text matches a keyword token, and if so use that.
  414. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  415. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name)
  416. #include "toolchain/lexer/token_kind.def"
  417. .Default(TokenKind::Error);
  418. if (kind != TokenKind::Error) {
  419. return buffer_->AddToken({.kind = kind,
  420. .token_line = current_line_,
  421. .column = identifier_column});
  422. }
  423. // Otherwise we have a generic identifier.
  424. return buffer_->AddToken({.kind = TokenKind::Identifier,
  425. .token_line = current_line_,
  426. .column = identifier_column,
  427. .id = GetOrCreateIdentifier(identifier_text)});
  428. }
  429. auto LexError(llvm::StringRef& source_text) -> LexResult {
  430. llvm::StringRef error_text = source_text.take_while([](char c) {
  431. if (IsAlnum(c)) {
  432. return false;
  433. }
  434. switch (c) {
  435. case '_':
  436. case '\t':
  437. case '\n':
  438. return false;
  439. }
  440. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  441. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  442. #include "toolchain/lexer/token_kind.def"
  443. .Default(true);
  444. });
  445. if (error_text.empty()) {
  446. // TODO: Reimplement this to use the lexer properly. In the meantime,
  447. // guarantee that we eat at least one byte.
  448. error_text = source_text.take_front(1);
  449. }
  450. auto token = buffer_->AddToken(
  451. {.kind = TokenKind::Error,
  452. .token_line = current_line_,
  453. .column = current_column_,
  454. .error_length = static_cast<int32_t>(error_text.size())});
  455. CARBON_DIAGNOSTIC(UnrecognizedCharacters, Error,
  456. "Encountered unrecognized characters while parsing.");
  457. emitter_.Emit(error_text.begin(), UnrecognizedCharacters);
  458. current_column_ += error_text.size();
  459. source_text = source_text.drop_front(error_text.size());
  460. return token;
  461. }
  462. auto AddEndOfFileToken() -> void {
  463. buffer_->AddToken({.kind = TokenKind::EndOfFile,
  464. .token_line = current_line_,
  465. .column = current_column_});
  466. }
  467. private:
  468. TokenizedBuffer* buffer_;
  469. SourceBufferLocationTranslator translator_;
  470. LexerDiagnosticEmitter emitter_;
  471. TokenLocationTranslator token_translator_;
  472. TokenDiagnosticEmitter token_emitter_;
  473. Line current_line_;
  474. LineInfo* current_line_info_;
  475. int current_column_ = 0;
  476. bool set_indent_ = false;
  477. llvm::SmallVector<Token> open_groups_;
  478. };
  479. auto TokenizedBuffer::Lex(SourceBuffer& source, DiagnosticConsumer& consumer)
  480. -> TokenizedBuffer {
  481. TokenizedBuffer buffer(source);
  482. ErrorTrackingDiagnosticConsumer error_tracking_consumer(consumer);
  483. Lexer lexer(buffer, error_tracking_consumer);
  484. llvm::StringRef source_text = source.text();
  485. while (lexer.SkipWhitespace(source_text)) {
  486. // Each time we find non-whitespace characters, try each kind of token we
  487. // support lexing, from simplest to most complex.
  488. Lexer::LexResult result = lexer.LexSymbolToken(source_text);
  489. if (!result) {
  490. result = lexer.LexKeywordOrIdentifier(source_text);
  491. }
  492. if (!result) {
  493. result = lexer.LexNumericLiteral(source_text);
  494. }
  495. if (!result) {
  496. result = lexer.LexStringLiteral(source_text);
  497. }
  498. if (!result) {
  499. result = lexer.LexError(source_text);
  500. }
  501. CARBON_CHECK(result) << "No token was lexed.";
  502. }
  503. // The end-of-file token is always considered to be whitespace.
  504. lexer.NoteWhitespace();
  505. lexer.CloseInvalidOpenGroups(TokenKind::Error);
  506. lexer.AddEndOfFileToken();
  507. if (error_tracking_consumer.seen_error()) {
  508. buffer.has_errors_ = true;
  509. }
  510. return buffer;
  511. }
  512. auto TokenizedBuffer::GetKind(Token token) const -> TokenKind {
  513. return GetTokenInfo(token).kind;
  514. }
  515. auto TokenizedBuffer::GetLine(Token token) const -> Line {
  516. return GetTokenInfo(token).token_line;
  517. }
  518. auto TokenizedBuffer::GetLineNumber(Token token) const -> int {
  519. return GetLineNumber(GetLine(token));
  520. }
  521. auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
  522. return GetTokenInfo(token).column + 1;
  523. }
  524. auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
  525. const auto& token_info = GetTokenInfo(token);
  526. llvm::StringRef fixed_spelling = token_info.kind.fixed_spelling();
  527. if (!fixed_spelling.empty()) {
  528. return fixed_spelling;
  529. }
  530. if (token_info.kind == TokenKind::Error) {
  531. const auto& line_info = GetLineInfo(token_info.token_line);
  532. int64_t token_start = line_info.start + token_info.column;
  533. return source_->text().substr(token_start, token_info.error_length);
  534. }
  535. // Refer back to the source text to preserve oddities like radix or digit
  536. // separators the author included.
  537. if (token_info.kind == TokenKind::IntegerLiteral ||
  538. token_info.kind == TokenKind::RealLiteral) {
  539. const auto& line_info = GetLineInfo(token_info.token_line);
  540. int64_t token_start = line_info.start + token_info.column;
  541. std::optional<LexedNumericLiteral> relexed_token =
  542. LexedNumericLiteral::Lex(source_->text().substr(token_start));
  543. CARBON_CHECK(relexed_token) << "Could not reform numeric literal token.";
  544. return relexed_token->text();
  545. }
  546. // Refer back to the source text to find the original spelling, including
  547. // escape sequences etc.
  548. if (token_info.kind == TokenKind::StringLiteral) {
  549. const auto& line_info = GetLineInfo(token_info.token_line);
  550. int64_t token_start = line_info.start + token_info.column;
  551. std::optional<LexedStringLiteral> relexed_token =
  552. LexedStringLiteral::Lex(source_->text().substr(token_start));
  553. CARBON_CHECK(relexed_token) << "Could not reform string literal token.";
  554. return relexed_token->text();
  555. }
  556. // Refer back to the source text to avoid needing to reconstruct the
  557. // spelling from the size.
  558. if (token_info.kind.is_sized_type_literal()) {
  559. const auto& line_info = GetLineInfo(token_info.token_line);
  560. int64_t token_start = line_info.start + token_info.column;
  561. llvm::StringRef suffix =
  562. source_->text().substr(token_start + 1).take_while(IsDecimalDigit);
  563. return llvm::StringRef(suffix.data() - 1, suffix.size() + 1);
  564. }
  565. if (token_info.kind == TokenKind::EndOfFile) {
  566. return llvm::StringRef();
  567. }
  568. CARBON_CHECK(token_info.kind == TokenKind::Identifier) << token_info.kind;
  569. return GetIdentifierText(token_info.id);
  570. }
  571. auto TokenizedBuffer::GetIdentifier(Token token) const -> Identifier {
  572. const auto& token_info = GetTokenInfo(token);
  573. CARBON_CHECK(token_info.kind == TokenKind::Identifier) << token_info.kind;
  574. return token_info.id;
  575. }
  576. auto TokenizedBuffer::GetIntegerLiteral(Token token) const
  577. -> const llvm::APInt& {
  578. const auto& token_info = GetTokenInfo(token);
  579. CARBON_CHECK(token_info.kind == TokenKind::IntegerLiteral) << token_info.kind;
  580. return literal_int_storage_[token_info.literal_index];
  581. }
  582. auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealLiteralValue {
  583. const auto& token_info = GetTokenInfo(token);
  584. CARBON_CHECK(token_info.kind == TokenKind::RealLiteral) << token_info.kind;
  585. // Note that every real literal is at least three characters long, so we can
  586. // safely look at the second character to determine whether we have a
  587. // decimal or hexadecimal literal.
  588. const auto& line_info = GetLineInfo(token_info.token_line);
  589. int64_t token_start = line_info.start + token_info.column;
  590. char second_char = source_->text()[token_start + 1];
  591. bool is_decimal = second_char != 'x' && second_char != 'b';
  592. return RealLiteralValue(this, token_info.literal_index, is_decimal);
  593. }
  594. auto TokenizedBuffer::GetStringLiteral(Token token) const -> llvm::StringRef {
  595. const auto& token_info = GetTokenInfo(token);
  596. CARBON_CHECK(token_info.kind == TokenKind::StringLiteral) << token_info.kind;
  597. return literal_string_storage_[token_info.literal_index];
  598. }
  599. auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
  600. -> const llvm::APInt& {
  601. const auto& token_info = GetTokenInfo(token);
  602. CARBON_CHECK(token_info.kind.is_sized_type_literal()) << token_info.kind;
  603. return literal_int_storage_[token_info.literal_index];
  604. }
  605. auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
  606. -> Token {
  607. const auto& opening_token_info = GetTokenInfo(opening_token);
  608. CARBON_CHECK(opening_token_info.kind.is_opening_symbol())
  609. << opening_token_info.kind;
  610. return opening_token_info.closing_token;
  611. }
  612. auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
  613. -> Token {
  614. const auto& closing_token_info = GetTokenInfo(closing_token);
  615. CARBON_CHECK(closing_token_info.kind.is_closing_symbol())
  616. << closing_token_info.kind;
  617. return closing_token_info.opening_token;
  618. }
  619. auto TokenizedBuffer::HasLeadingWhitespace(Token token) const -> bool {
  620. auto it = TokenIterator(token);
  621. return it == tokens().begin() || GetTokenInfo(*(it - 1)).has_trailing_space;
  622. }
  623. auto TokenizedBuffer::HasTrailingWhitespace(Token token) const -> bool {
  624. return GetTokenInfo(token).has_trailing_space;
  625. }
  626. auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
  627. return GetTokenInfo(token).is_recovery;
  628. }
  629. auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
  630. return line.index + 1;
  631. }
  632. auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
  633. return GetLineInfo(line).indent + 1;
  634. }
  635. auto TokenizedBuffer::GetIdentifierText(Identifier identifier) const
  636. -> llvm::StringRef {
  637. return identifier_infos_[identifier.index].text;
  638. }
  639. auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
  640. index = std::max(widths.index, index);
  641. kind = std::max(widths.kind, kind);
  642. column = std::max(widths.column, column);
  643. line = std::max(widths.line, line);
  644. indent = std::max(widths.indent, indent);
  645. }
  646. // Compute the printed width of a number. When numbers are printed in decimal,
  647. // the number of digits needed is is one more than the log-base-10 of the
  648. // value. We handle a value of `zero` explicitly.
  649. //
  650. // This routine requires its argument to be *non-negative*.
  651. static auto ComputeDecimalPrintedWidth(int number) -> int {
  652. CARBON_CHECK(number >= 0) << "Negative numbers are not supported.";
  653. if (number == 0) {
  654. return 1;
  655. }
  656. return static_cast<int>(std::log10(number)) + 1;
  657. }
  658. auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
  659. PrintWidths widths = {};
  660. widths.index = ComputeDecimalPrintedWidth(token_infos_.size());
  661. widths.kind = GetKind(token).name().size();
  662. widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
  663. widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
  664. widths.indent =
  665. ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
  666. return widths;
  667. }
  668. auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
  669. if (tokens().begin() == tokens().end()) {
  670. return;
  671. }
  672. PrintWidths widths = {};
  673. widths.index = ComputeDecimalPrintedWidth((token_infos_.size()));
  674. for (Token token : tokens()) {
  675. widths.Widen(GetTokenPrintWidths(token));
  676. }
  677. output_stream << "[\n";
  678. for (Token token : tokens()) {
  679. PrintToken(output_stream, token, widths);
  680. output_stream << "\n";
  681. }
  682. output_stream << "]\n";
  683. }
  684. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  685. Token token) const -> void {
  686. PrintToken(output_stream, token, {});
  687. }
  688. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
  689. PrintWidths widths) const -> void {
  690. widths.Widen(GetTokenPrintWidths(token));
  691. int token_index = token.index;
  692. const auto& token_info = GetTokenInfo(token);
  693. llvm::StringRef token_text = GetTokenText(token);
  694. // Output the main chunk using one format string. We have to do the
  695. // justification manually in order to use the dynamically computed widths
  696. // and get the quotes included.
  697. output_stream << llvm::formatv(
  698. "{ index: {0}, kind: {1}, line: {2}, column: {3}, indent: {4}, "
  699. "spelling: '{5}'",
  700. llvm::format_decimal(token_index, widths.index),
  701. llvm::right_justify(llvm::formatv("'{0}'", token_info.kind.name()).str(),
  702. widths.kind + 2),
  703. llvm::format_decimal(GetLineNumber(token_info.token_line), widths.line),
  704. llvm::format_decimal(GetColumnNumber(token), widths.column),
  705. llvm::format_decimal(GetIndentColumnNumber(token_info.token_line),
  706. widths.indent),
  707. token_text);
  708. switch (token_info.kind) {
  709. case TokenKind::Identifier:
  710. output_stream << ", identifier: " << GetIdentifier(token).index;
  711. break;
  712. case TokenKind::IntegerLiteral:
  713. output_stream << ", value: `";
  714. GetIntegerLiteral(token).print(output_stream, /*isSigned=*/false);
  715. output_stream << "`";
  716. break;
  717. case TokenKind::RealLiteral:
  718. output_stream << ", value: `" << GetRealLiteral(token) << "`";
  719. break;
  720. case TokenKind::StringLiteral:
  721. output_stream << ", value: `" << GetStringLiteral(token) << "`";
  722. break;
  723. default:
  724. if (token_info.kind.is_opening_symbol()) {
  725. output_stream << ", closing_token: "
  726. << GetMatchedClosingToken(token).index;
  727. } else if (token_info.kind.is_closing_symbol()) {
  728. output_stream << ", opening_token: "
  729. << GetMatchedOpeningToken(token).index;
  730. }
  731. break;
  732. }
  733. if (token_info.has_trailing_space) {
  734. output_stream << ", has_trailing_space: true";
  735. }
  736. if (token_info.is_recovery) {
  737. output_stream << ", recovery: true";
  738. }
  739. output_stream << " },";
  740. }
  741. auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
  742. return line_infos_[line.index];
  743. }
  744. auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
  745. return line_infos_[line.index];
  746. }
  747. auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
  748. line_infos_.push_back(info);
  749. return Line(static_cast<int>(line_infos_.size()) - 1);
  750. }
  751. auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
  752. return token_infos_[token.index];
  753. }
  754. auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
  755. return token_infos_[token.index];
  756. }
  757. auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
  758. token_infos_.push_back(info);
  759. expected_parse_tree_size_ += info.kind.expected_parse_tree_size();
  760. return Token(static_cast<int>(token_infos_.size()) - 1);
  761. }
  762. auto TokenizedBuffer::TokenIterator::Print(llvm::raw_ostream& output) const
  763. -> void {
  764. output << token_.index;
  765. }
  766. auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
  767. const char* loc) -> DiagnosticLocation {
  768. CARBON_CHECK(StringRefContainsPointer(buffer_->source_->text(), loc))
  769. << "location not within buffer";
  770. int64_t offset = loc - buffer_->source_->text().begin();
  771. // Find the first line starting after the given location. Note that we can't
  772. // inspect `line.length` here because it is not necessarily correct for the
  773. // final line during lexing (but will be correct later for the parse tree).
  774. const auto* line_it = std::partition_point(
  775. buffer_->line_infos_.begin(), buffer_->line_infos_.end(),
  776. [offset](const LineInfo& line) { return line.start <= offset; });
  777. bool incomplete_line_info = last_line_lexed_to_column_ != nullptr &&
  778. line_it == buffer_->line_infos_.end();
  779. // Step back one line to find the line containing the given position.
  780. CARBON_CHECK(line_it != buffer_->line_infos_.begin())
  781. << "location precedes the start of the first line";
  782. --line_it;
  783. int line_number = line_it - buffer_->line_infos_.begin();
  784. int column_number = offset - line_it->start;
  785. llvm::StringRef line;
  786. // We might still be lexing the last line. If so, check to see if there are
  787. // any newline characters between the position we've finished lexing up to
  788. // and the given location.
  789. if (incomplete_line_info && column_number > *last_line_lexed_to_column_) {
  790. column_number = *last_line_lexed_to_column_;
  791. int64_t start = line_it->start;
  792. for (int64_t i = line_it->start + *last_line_lexed_to_column_; i != offset;
  793. ++i) {
  794. if (buffer_->source_->text()[i] == '\n') {
  795. start = i;
  796. ++line_number;
  797. column_number = 0;
  798. } else {
  799. ++column_number;
  800. }
  801. }
  802. line = buffer_->source_->text().substr(start).take_until(
  803. [](char c) { return c == '\n'; });
  804. } else if (line_it->length < 0) {
  805. line =
  806. buffer_->source_->text().substr(line_it->start).take_until([](char c) {
  807. return c == '\n';
  808. });
  809. } else {
  810. line = buffer_->source_->text().substr(line_it->start, line_it->length);
  811. }
  812. return {.file_name = buffer_->source_->filename(),
  813. .line = line,
  814. .line_number = line_number + 1,
  815. .column_number = column_number + 1};
  816. }
  817. auto TokenizedBuffer::TokenLocationTranslator::GetLocation(Token token)
  818. -> DiagnosticLocation {
  819. // Map the token location into a position within the source buffer.
  820. const auto& token_info = buffer_->GetTokenInfo(token);
  821. const auto& line_info = buffer_->GetLineInfo(token_info.token_line);
  822. const char* token_start =
  823. buffer_->source_->text().begin() + line_info.start + token_info.column;
  824. // Find the corresponding file location.
  825. // TODO: Should we somehow indicate in the diagnostic location if this token
  826. // is a recovery token that doesn't correspond to the original source?
  827. return SourceBufferLocationTranslator(buffer_, last_line_lexed_to_column_)
  828. .GetLocation(token_start);
  829. }
  830. } // namespace Carbon