tokenized_buffer.cpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/lexer/tokenized_buffer.h"
  5. #include <algorithm>
  6. #include <array>
  7. #include <cmath>
  8. #include <iterator>
  9. #include <string>
  10. #include "common/check.h"
  11. #include "llvm/ADT/STLExtras.h"
  12. #include "llvm/ADT/StringRef.h"
  13. #include "llvm/ADT/StringSwitch.h"
  14. #include "llvm/ADT/Twine.h"
  15. #include "llvm/Support/ErrorHandling.h"
  16. #include "llvm/Support/Format.h"
  17. #include "llvm/Support/FormatVariadic.h"
  18. #include "llvm/Support/raw_ostream.h"
  19. #include "toolchain/lexer/character_set.h"
  20. #include "toolchain/lexer/numeric_literal.h"
  21. #include "toolchain/lexer/string_literal.h"
  22. namespace Carbon {
  23. struct TrailingComment : DiagnosticBase<TrailingComment> {
  24. static constexpr llvm::StringLiteral ShortName = "syntax-comments";
  25. static constexpr llvm::StringLiteral Message =
  26. "Trailing comments are not permitted.";
  27. };
  28. struct NoWhitespaceAfterCommentIntroducer
  29. : DiagnosticBase<NoWhitespaceAfterCommentIntroducer> {
  30. static constexpr llvm::StringLiteral ShortName = "syntax-comments";
  31. static constexpr llvm::StringLiteral Message =
  32. "Whitespace is required after '//'.";
  33. };
  34. struct UnmatchedClosing : DiagnosticBase<UnmatchedClosing> {
  35. static constexpr llvm::StringLiteral ShortName = "syntax-balanced-delimiters";
  36. static constexpr llvm::StringLiteral Message =
  37. "Closing symbol without a corresponding opening symbol.";
  38. };
  39. struct MismatchedClosing : DiagnosticBase<MismatchedClosing> {
  40. static constexpr llvm::StringLiteral ShortName = "syntax-balanced-delimiters";
  41. static constexpr llvm::StringLiteral Message =
  42. "Closing symbol does not match most recent opening symbol.";
  43. };
  44. struct UnrecognizedCharacters : DiagnosticBase<UnrecognizedCharacters> {
  45. static constexpr llvm::StringLiteral ShortName =
  46. "syntax-unrecognized-characters";
  47. static constexpr llvm::StringLiteral Message =
  48. "Encountered unrecognized characters while parsing.";
  49. };
  50. // TODO: Move Overload and VariantMatch somewhere more central.
  51. // Form an overload set from a list of functions. For example:
  52. //
  53. // ```
  54. // auto overloaded = Overload{[] (int) {}, [] (float) {}};
  55. // ```
  56. template <typename... Fs>
  57. struct Overload : Fs... {
  58. using Fs::operator()...;
  59. };
  60. template <typename... Fs>
  61. Overload(Fs...) -> Overload<Fs...>;
  62. // Pattern-match against the type of the value stored in the variant `V`. Each
  63. // element of `fs` should be a function that takes one or more of the variant
  64. // values in `V`.
  65. template <typename V, typename... Fs>
  66. auto VariantMatch(V&& v, Fs&&... fs) -> decltype(auto) {
  67. return std::visit(Overload{std::forward<Fs&&>(fs)...}, std::forward<V&&>(v));
  68. }
  69. // Implementation of the lexer logic itself.
  70. //
  71. // The design is that lexing can loop over the source buffer, consuming it into
  72. // tokens by calling into this API. This class handles the state and breaks down
  73. // the different lexing steps that may be used. It directly updates the provided
  74. // tokenized buffer with the lexed tokens.
  75. class TokenizedBuffer::Lexer {
  76. public:
  77. // Symbolic result of a lexing action. This indicates whether we successfully
  78. // lexed a token, or whether other lexing actions should be attempted.
  79. //
  80. // While it wraps a simple boolean state, its API both helps make the failures
  81. // more self documenting, and by consuming the actual token constructively
  82. // when one is produced, it helps ensure the correct result is returned.
  83. class LexResult {
  84. public:
  85. // Consumes (and discard) a valid token to construct a result
  86. // indicating a token has been produced. Relies on implicit conversions.
  87. // NOLINTNEXTLINE(google-explicit-constructor)
  88. LexResult(Token) : LexResult(true) {}
  89. // Returns a result indicating no token was produced.
  90. static auto NoMatch() -> LexResult { return LexResult(false); }
  91. // Tests whether a token was produced by the lexing routine, and
  92. // the lexer can continue forming tokens.
  93. explicit operator bool() const { return formed_token_; }
  94. private:
  95. explicit LexResult(bool formed_token) : formed_token_(formed_token) {}
  96. bool formed_token_;
  97. };
  98. Lexer(TokenizedBuffer& buffer, DiagnosticConsumer& consumer)
  99. : buffer_(buffer),
  100. translator_(buffer, &current_column_),
  101. emitter_(translator_, consumer),
  102. token_translator_(buffer, &current_column_),
  103. token_emitter_(token_translator_, consumer),
  104. current_line_(buffer.AddLine({0, 0, 0})),
  105. current_line_info_(&buffer.GetLineInfo(current_line_)) {}
  106. // Perform the necessary bookkeeping to step past a newline at the current
  107. // line and column.
  108. auto HandleNewline() -> void {
  109. current_line_info_->length = current_column_;
  110. current_line_ = buffer_.AddLine(
  111. {current_line_info_->start + current_column_ + 1, 0, 0});
  112. current_line_info_ = &buffer_.GetLineInfo(current_line_);
  113. current_column_ = 0;
  114. set_indent_ = false;
  115. }
  116. auto NoteWhitespace() -> void {
  117. if (!buffer_.token_infos_.empty()) {
  118. buffer_.token_infos_.back().has_trailing_space = true;
  119. }
  120. }
  121. auto SkipWhitespace(llvm::StringRef& source_text) -> bool {
  122. const char* const whitespace_start = source_text.begin();
  123. while (!source_text.empty()) {
  124. // We only support line-oriented commenting and lex comments as-if they
  125. // were whitespace.
  126. if (source_text.startswith("//")) {
  127. // Any comment must be the only non-whitespace on the line.
  128. if (set_indent_) {
  129. emitter_.EmitError<TrailingComment>(source_text.begin());
  130. }
  131. // The introducer '//' must be followed by whitespace or EOF.
  132. if (source_text.size() > 2 && !IsSpace(source_text[2])) {
  133. emitter_.EmitError<NoWhitespaceAfterCommentIntroducer>(
  134. source_text.begin() + 2);
  135. }
  136. while (!source_text.empty() && source_text.front() != '\n') {
  137. ++current_column_;
  138. source_text = source_text.drop_front();
  139. }
  140. if (source_text.empty()) {
  141. break;
  142. }
  143. }
  144. switch (source_text.front()) {
  145. default:
  146. // If we find a non-whitespace character without exhausting the
  147. // buffer, return true to continue lexing.
  148. assert(!IsSpace(source_text.front()));
  149. if (whitespace_start != source_text.begin()) {
  150. NoteWhitespace();
  151. }
  152. return true;
  153. case '\n':
  154. // If this is the last character in the source, directly return here
  155. // to avoid creating an empty line.
  156. source_text = source_text.drop_front();
  157. if (source_text.empty()) {
  158. current_line_info_->length = current_column_;
  159. return false;
  160. }
  161. // Otherwise, add a line and set up to continue lexing.
  162. HandleNewline();
  163. continue;
  164. case ' ':
  165. case '\t':
  166. // Skip other forms of whitespace while tracking column.
  167. // FIXME: This obviously needs looooots more work to handle unicode
  168. // whitespace as well as special handling to allow better tokenization
  169. // of operators. This is just a stub to check that our column
  170. // management works.
  171. ++current_column_;
  172. source_text = source_text.drop_front();
  173. continue;
  174. }
  175. }
  176. CHECK(source_text.empty()) << "Cannot reach here w/o finishing the text!";
  177. // Update the line length as this is also the end of a line.
  178. current_line_info_->length = current_column_;
  179. return false;
  180. }
  181. auto LexNumericLiteral(llvm::StringRef& source_text) -> LexResult {
  182. llvm::Optional<LexedNumericLiteral> literal =
  183. LexedNumericLiteral::Lex(source_text);
  184. if (!literal) {
  185. return LexResult::NoMatch();
  186. }
  187. int int_column = current_column_;
  188. int token_size = literal->Text().size();
  189. current_column_ += token_size;
  190. source_text = source_text.drop_front(token_size);
  191. if (!set_indent_) {
  192. current_line_info_->indent = int_column;
  193. set_indent_ = true;
  194. }
  195. return VariantMatch(
  196. literal->ComputeValue(emitter_),
  197. [&](LexedNumericLiteral::IntegerValue&& value) {
  198. auto token = buffer_.AddToken({.kind = TokenKind::IntegerLiteral(),
  199. .token_line = current_line_,
  200. .column = int_column});
  201. buffer_.GetTokenInfo(token).literal_index =
  202. buffer_.literal_int_storage_.size();
  203. buffer_.literal_int_storage_.push_back(std::move(value.value));
  204. return token;
  205. },
  206. [&](LexedNumericLiteral::RealValue&& value) {
  207. auto token = buffer_.AddToken({.kind = TokenKind::RealLiteral(),
  208. .token_line = current_line_,
  209. .column = int_column});
  210. buffer_.GetTokenInfo(token).literal_index =
  211. buffer_.literal_int_storage_.size();
  212. buffer_.literal_int_storage_.push_back(std::move(value.mantissa));
  213. buffer_.literal_int_storage_.push_back(std::move(value.exponent));
  214. assert(buffer_.GetRealLiteral(token).IsDecimal() ==
  215. (value.radix == 10));
  216. return token;
  217. },
  218. [&](LexedNumericLiteral::UnrecoverableError) {
  219. auto token = buffer_.AddToken({
  220. .kind = TokenKind::Error(),
  221. .token_line = current_line_,
  222. .column = int_column,
  223. .error_length = token_size,
  224. });
  225. return token;
  226. });
  227. }
  228. auto LexStringLiteral(llvm::StringRef& source_text) -> LexResult {
  229. llvm::Optional<LexedStringLiteral> literal =
  230. LexedStringLiteral::Lex(source_text);
  231. if (!literal) {
  232. return LexResult::NoMatch();
  233. }
  234. Line string_line = current_line_;
  235. int string_column = current_column_;
  236. int literal_size = literal->Text().size();
  237. source_text = source_text.drop_front(literal_size);
  238. if (!set_indent_) {
  239. current_line_info_->indent = string_column;
  240. set_indent_ = true;
  241. }
  242. // Update line and column information.
  243. if (!literal->IsMultiLine()) {
  244. current_column_ += literal_size;
  245. } else {
  246. for (char c : literal->Text()) {
  247. if (c == '\n') {
  248. HandleNewline();
  249. // The indentation of all lines in a multi-line string literal is
  250. // that of the first line.
  251. current_line_info_->indent = string_column;
  252. set_indent_ = true;
  253. } else {
  254. ++current_column_;
  255. }
  256. }
  257. }
  258. auto token = buffer_.AddToken({.kind = TokenKind::StringLiteral(),
  259. .token_line = string_line,
  260. .column = string_column});
  261. buffer_.GetTokenInfo(token).literal_index =
  262. buffer_.literal_string_storage_.size();
  263. buffer_.literal_string_storage_.push_back(literal->ComputeValue(emitter_));
  264. return token;
  265. }
  266. auto LexSymbolToken(llvm::StringRef& source_text) -> LexResult {
  267. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text)
  268. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  269. .StartsWith(Spelling, TokenKind::Name())
  270. #include "toolchain/lexer/token_registry.def"
  271. .Default(TokenKind::Error());
  272. if (kind == TokenKind::Error()) {
  273. return LexResult::NoMatch();
  274. }
  275. if (!set_indent_) {
  276. current_line_info_->indent = current_column_;
  277. set_indent_ = true;
  278. }
  279. CloseInvalidOpenGroups(kind);
  280. const char* location = source_text.begin();
  281. Token token = buffer_.AddToken(
  282. {.kind = kind, .token_line = current_line_, .column = current_column_});
  283. current_column_ += kind.GetFixedSpelling().size();
  284. source_text = source_text.drop_front(kind.GetFixedSpelling().size());
  285. // Opening symbols just need to be pushed onto our queue of opening groups.
  286. if (kind.IsOpeningSymbol()) {
  287. open_groups_.push_back(token);
  288. return token;
  289. }
  290. // Only closing symbols need further special handling.
  291. if (!kind.IsClosingSymbol()) {
  292. return token;
  293. }
  294. TokenInfo& closing_token_info = buffer_.GetTokenInfo(token);
  295. // Check that there is a matching opening symbol before we consume this as
  296. // a closing symbol.
  297. if (open_groups_.empty()) {
  298. closing_token_info.kind = TokenKind::Error();
  299. closing_token_info.error_length = kind.GetFixedSpelling().size();
  300. emitter_.EmitError<UnmatchedClosing>(location);
  301. // Note that this still returns true as we do consume a symbol.
  302. return token;
  303. }
  304. // Finally can handle a normal closing symbol.
  305. Token opening_token = open_groups_.pop_back_val();
  306. TokenInfo& opening_token_info = buffer_.GetTokenInfo(opening_token);
  307. opening_token_info.closing_token = token;
  308. closing_token_info.opening_token = opening_token;
  309. return token;
  310. }
  311. // Given a word that has already been lexed, determine whether it is a type
  312. // literal and if so form the corresponding token.
  313. auto LexWordAsTypeLiteralToken(llvm::StringRef word, int column)
  314. -> LexResult {
  315. if (word.size() < 2) {
  316. // Too short to form one of these tokens.
  317. return LexResult::NoMatch();
  318. }
  319. if (!('1' <= word[1] && word[1] <= '9')) {
  320. // Doesn't start with a valid initial digit.
  321. return LexResult::NoMatch();
  322. }
  323. llvm::Optional<TokenKind> kind;
  324. switch (word.front()) {
  325. case 'i':
  326. kind = TokenKind::IntegerTypeLiteral();
  327. break;
  328. case 'u':
  329. kind = TokenKind::UnsignedIntegerTypeLiteral();
  330. break;
  331. case 'f':
  332. kind = TokenKind::FloatingPointTypeLiteral();
  333. break;
  334. default:
  335. return LexResult::NoMatch();
  336. };
  337. llvm::StringRef suffix = word.substr(1);
  338. llvm::APInt suffix_value;
  339. if (suffix.getAsInteger(10, suffix_value)) {
  340. return LexResult::NoMatch();
  341. }
  342. auto token = buffer_.AddToken(
  343. {.kind = *kind, .token_line = current_line_, .column = column});
  344. buffer_.GetTokenInfo(token).literal_index =
  345. buffer_.literal_int_storage_.size();
  346. buffer_.literal_int_storage_.push_back(std::move(suffix_value));
  347. return token;
  348. }
  349. // Closes all open groups that cannot remain open across the symbol `K`.
  350. // Users may pass `Error` to close all open groups.
  351. auto CloseInvalidOpenGroups(TokenKind kind) -> void {
  352. if (!kind.IsClosingSymbol() && kind != TokenKind::Error()) {
  353. return;
  354. }
  355. while (!open_groups_.empty()) {
  356. Token opening_token = open_groups_.back();
  357. TokenKind opening_kind = buffer_.GetTokenInfo(opening_token).kind;
  358. if (kind == opening_kind.GetClosingSymbol()) {
  359. return;
  360. }
  361. open_groups_.pop_back();
  362. token_emitter_.EmitError<MismatchedClosing>(opening_token);
  363. CHECK(!buffer_.Tokens().empty()) << "Must have a prior opening token!";
  364. Token prev_token = buffer_.Tokens().end()[-1];
  365. // TODO: do a smarter backwards scan for where to put the closing
  366. // token.
  367. Token closing_token = buffer_.AddToken(
  368. {.kind = opening_kind.GetClosingSymbol(),
  369. .has_trailing_space = buffer_.HasTrailingWhitespace(prev_token),
  370. .is_recovery = true,
  371. .token_line = current_line_,
  372. .column = current_column_});
  373. TokenInfo& opening_token_info = buffer_.GetTokenInfo(opening_token);
  374. TokenInfo& closing_token_info = buffer_.GetTokenInfo(closing_token);
  375. opening_token_info.closing_token = closing_token;
  376. closing_token_info.opening_token = opening_token;
  377. }
  378. }
  379. auto GetOrCreateIdentifier(llvm::StringRef text) -> Identifier {
  380. auto insert_result = buffer_.identifier_map_.insert(
  381. {text, Identifier(buffer_.identifier_infos_.size())});
  382. if (insert_result.second) {
  383. buffer_.identifier_infos_.push_back({text});
  384. }
  385. return insert_result.first->second;
  386. }
  387. auto LexKeywordOrIdentifier(llvm::StringRef& source_text) -> LexResult {
  388. if (!IsAlpha(source_text.front()) && source_text.front() != '_') {
  389. return LexResult::NoMatch();
  390. }
  391. if (!set_indent_) {
  392. current_line_info_->indent = current_column_;
  393. set_indent_ = true;
  394. }
  395. // Take the valid characters off the front of the source buffer.
  396. llvm::StringRef identifier_text =
  397. source_text.take_while([](char c) { return IsAlnum(c) || c == '_'; });
  398. CHECK(!identifier_text.empty()) << "Must have at least one character!";
  399. int identifier_column = current_column_;
  400. current_column_ += identifier_text.size();
  401. source_text = source_text.drop_front(identifier_text.size());
  402. // Check if the text is a type literal, and if so form such a literal.
  403. if (LexResult result =
  404. LexWordAsTypeLiteralToken(identifier_text, identifier_column)) {
  405. return result;
  406. }
  407. // Check if the text matches a keyword token, and if so use that.
  408. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  409. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name())
  410. #include "toolchain/lexer/token_registry.def"
  411. .Default(TokenKind::Error());
  412. if (kind != TokenKind::Error()) {
  413. return buffer_.AddToken({.kind = kind,
  414. .token_line = current_line_,
  415. .column = identifier_column});
  416. }
  417. // Otherwise we have a generic identifier.
  418. return buffer_.AddToken({.kind = TokenKind::Identifier(),
  419. .token_line = current_line_,
  420. .column = identifier_column,
  421. .id = GetOrCreateIdentifier(identifier_text)});
  422. }
  423. auto LexError(llvm::StringRef& source_text) -> LexResult {
  424. llvm::StringRef error_text = source_text.take_while([](char c) {
  425. if (IsAlnum(c)) {
  426. return false;
  427. }
  428. switch (c) {
  429. case '_':
  430. case '\t':
  431. case '\n':
  432. return false;
  433. }
  434. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  435. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  436. #include "toolchain/lexer/token_registry.def"
  437. .Default(true);
  438. });
  439. if (error_text.empty()) {
  440. // TODO: Reimplement this to use the lexer properly. In the meantime,
  441. // guarantee that we eat at least one byte.
  442. error_text = source_text.take_front(1);
  443. }
  444. // Longer errors get to be two tokens.
  445. error_text = error_text.substr(0, std::numeric_limits<int32_t>::max());
  446. auto token = buffer_.AddToken(
  447. {.kind = TokenKind::Error(),
  448. .token_line = current_line_,
  449. .column = current_column_,
  450. .error_length = static_cast<int32_t>(error_text.size())});
  451. emitter_.EmitError<UnrecognizedCharacters>(error_text.begin());
  452. current_column_ += error_text.size();
  453. source_text = source_text.drop_front(error_text.size());
  454. return token;
  455. }
  456. auto AddEndOfFileToken() -> void {
  457. buffer_.AddToken({.kind = TokenKind::EndOfFile(),
  458. .token_line = current_line_,
  459. .column = current_column_});
  460. }
  461. private:
  462. TokenizedBuffer& buffer_;
  463. SourceBufferLocationTranslator translator_;
  464. LexerDiagnosticEmitter emitter_;
  465. TokenLocationTranslator token_translator_;
  466. TokenDiagnosticEmitter token_emitter_;
  467. Line current_line_;
  468. LineInfo* current_line_info_;
  469. int current_column_ = 0;
  470. bool set_indent_ = false;
  471. llvm::SmallVector<Token, 8> open_groups_;
  472. };
  473. auto TokenizedBuffer::Lex(SourceBuffer& source, DiagnosticConsumer& consumer)
  474. -> TokenizedBuffer {
  475. TokenizedBuffer buffer(source);
  476. ErrorTrackingDiagnosticConsumer error_tracking_consumer(consumer);
  477. Lexer lexer(buffer, error_tracking_consumer);
  478. llvm::StringRef source_text = source.Text();
  479. while (lexer.SkipWhitespace(source_text)) {
  480. // Each time we find non-whitespace characters, try each kind of token we
  481. // support lexing, from simplest to most complex.
  482. Lexer::LexResult result = lexer.LexSymbolToken(source_text);
  483. if (!result) {
  484. result = lexer.LexKeywordOrIdentifier(source_text);
  485. }
  486. if (!result) {
  487. result = lexer.LexNumericLiteral(source_text);
  488. }
  489. if (!result) {
  490. result = lexer.LexStringLiteral(source_text);
  491. }
  492. if (!result) {
  493. result = lexer.LexError(source_text);
  494. }
  495. CHECK(result) << "No token was lexed.";
  496. }
  497. // The end-of-file token is always considered to be whitespace.
  498. lexer.NoteWhitespace();
  499. lexer.CloseInvalidOpenGroups(TokenKind::Error());
  500. lexer.AddEndOfFileToken();
  501. if (error_tracking_consumer.SeenError()) {
  502. buffer.has_errors_ = true;
  503. }
  504. return buffer;
  505. }
  506. auto TokenizedBuffer::GetKind(Token token) const -> TokenKind {
  507. return GetTokenInfo(token).kind;
  508. }
  509. auto TokenizedBuffer::GetLine(Token token) const -> Line {
  510. return GetTokenInfo(token).token_line;
  511. }
  512. auto TokenizedBuffer::GetLineNumber(Token token) const -> int {
  513. return GetLineNumber(GetLine(token));
  514. }
  515. auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
  516. return GetTokenInfo(token).column + 1;
  517. }
  518. auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
  519. auto& token_info = GetTokenInfo(token);
  520. llvm::StringRef fixed_spelling = token_info.kind.GetFixedSpelling();
  521. if (!fixed_spelling.empty()) {
  522. return fixed_spelling;
  523. }
  524. if (token_info.kind == TokenKind::Error()) {
  525. auto& line_info = GetLineInfo(token_info.token_line);
  526. int64_t token_start = line_info.start + token_info.column;
  527. return source_->Text().substr(token_start, token_info.error_length);
  528. }
  529. // Refer back to the source text to preserve oddities like radix or digit
  530. // separators the author included.
  531. if (token_info.kind == TokenKind::IntegerLiteral() ||
  532. token_info.kind == TokenKind::RealLiteral()) {
  533. auto& line_info = GetLineInfo(token_info.token_line);
  534. int64_t token_start = line_info.start + token_info.column;
  535. llvm::Optional<LexedNumericLiteral> relexed_token =
  536. LexedNumericLiteral::Lex(source_->Text().substr(token_start));
  537. CHECK(relexed_token) << "Could not reform numeric literal token.";
  538. return relexed_token->Text();
  539. }
  540. // Refer back to the source text to find the original spelling, including
  541. // escape sequences etc.
  542. if (token_info.kind == TokenKind::StringLiteral()) {
  543. auto& line_info = GetLineInfo(token_info.token_line);
  544. int64_t token_start = line_info.start + token_info.column;
  545. llvm::Optional<LexedStringLiteral> relexed_token =
  546. LexedStringLiteral::Lex(source_->Text().substr(token_start));
  547. CHECK(relexed_token) << "Could not reform string literal token.";
  548. return relexed_token->Text();
  549. }
  550. // Refer back to the source text to avoid needing to reconstruct the
  551. // spelling from the size.
  552. if (token_info.kind.IsSizedTypeLiteral()) {
  553. auto& line_info = GetLineInfo(token_info.token_line);
  554. int64_t token_start = line_info.start + token_info.column;
  555. llvm::StringRef suffix =
  556. source_->Text().substr(token_start + 1).take_while(IsDecimalDigit);
  557. return llvm::StringRef(suffix.data() - 1, suffix.size() + 1);
  558. }
  559. if (token_info.kind == TokenKind::EndOfFile()) {
  560. return llvm::StringRef();
  561. }
  562. CHECK(token_info.kind == TokenKind::Identifier())
  563. << "Only identifiers have stored text!";
  564. return GetIdentifierText(token_info.id);
  565. }
  566. auto TokenizedBuffer::GetIdentifier(Token token) const -> Identifier {
  567. auto& token_info = GetTokenInfo(token);
  568. CHECK(token_info.kind == TokenKind::Identifier())
  569. << "The token must be an identifier!";
  570. return token_info.id;
  571. }
  572. auto TokenizedBuffer::GetIntegerLiteral(Token token) const
  573. -> const llvm::APInt& {
  574. auto& token_info = GetTokenInfo(token);
  575. CHECK(token_info.kind == TokenKind::IntegerLiteral())
  576. << "The token must be an integer literal!";
  577. return literal_int_storage_[token_info.literal_index];
  578. }
  579. auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealLiteralValue {
  580. auto& token_info = GetTokenInfo(token);
  581. CHECK(token_info.kind == TokenKind::RealLiteral())
  582. << "The token must be a real literal!";
  583. // Note that every real literal is at least three characters long, so we can
  584. // safely look at the second character to determine whether we have a decimal
  585. // or hexadecimal literal.
  586. auto& line_info = GetLineInfo(token_info.token_line);
  587. int64_t token_start = line_info.start + token_info.column;
  588. char second_char = source_->Text()[token_start + 1];
  589. bool is_decimal = second_char != 'x' && second_char != 'b';
  590. return RealLiteralValue(this, token_info.literal_index, is_decimal);
  591. }
  592. auto TokenizedBuffer::GetStringLiteral(Token token) const -> llvm::StringRef {
  593. auto& token_info = GetTokenInfo(token);
  594. CHECK(token_info.kind == TokenKind::StringLiteral())
  595. << "The token must be a string literal!";
  596. return literal_string_storage_[token_info.literal_index];
  597. }
  598. auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
  599. -> const llvm::APInt& {
  600. auto& token_info = GetTokenInfo(token);
  601. CHECK(token_info.kind.IsSizedTypeLiteral())
  602. << "The token must be a sized type literal!";
  603. return literal_int_storage_[token_info.literal_index];
  604. }
  605. auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
  606. -> Token {
  607. auto& opening_token_info = GetTokenInfo(opening_token);
  608. CHECK(opening_token_info.kind.IsOpeningSymbol())
  609. << "The token must be an opening group symbol!";
  610. return opening_token_info.closing_token;
  611. }
  612. auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
  613. -> Token {
  614. auto& closing_token_info = GetTokenInfo(closing_token);
  615. CHECK(closing_token_info.kind.IsClosingSymbol())
  616. << "The token must be an closing group symbol!";
  617. return closing_token_info.opening_token;
  618. }
  619. auto TokenizedBuffer::HasLeadingWhitespace(Token token) const -> bool {
  620. auto it = TokenIterator(token);
  621. return it == Tokens().begin() || GetTokenInfo(*(it - 1)).has_trailing_space;
  622. }
  623. auto TokenizedBuffer::HasTrailingWhitespace(Token token) const -> bool {
  624. return GetTokenInfo(token).has_trailing_space;
  625. }
  626. auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
  627. return GetTokenInfo(token).is_recovery;
  628. }
  629. auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
  630. return line.index_ + 1;
  631. }
  632. auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
  633. return GetLineInfo(line).indent + 1;
  634. }
  635. auto TokenizedBuffer::GetIdentifierText(Identifier identifier) const
  636. -> llvm::StringRef {
  637. return identifier_infos_[identifier.index_].text;
  638. }
  639. auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
  640. index = std::max(widths.index, index);
  641. kind = std::max(widths.kind, kind);
  642. column = std::max(widths.column, column);
  643. line = std::max(widths.line, line);
  644. indent = std::max(widths.indent, indent);
  645. }
  646. // Compute the printed width of a number. When numbers are printed in decimal,
  647. // the number of digits needed is is one more than the log-base-10 of the value.
  648. // We handle a value of `zero` explicitly.
  649. //
  650. // This routine requires its argument to be *non-negative*.
  651. static auto ComputeDecimalPrintedWidth(int number) -> int {
  652. CHECK(number >= 0) << "Negative numbers are not supported.";
  653. if (number == 0) {
  654. return 1;
  655. }
  656. return static_cast<int>(std::log10(number)) + 1;
  657. }
  658. auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
  659. PrintWidths widths = {};
  660. widths.index = ComputeDecimalPrintedWidth(token_infos_.size());
  661. widths.kind = GetKind(token).Name().size();
  662. widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
  663. widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
  664. widths.indent =
  665. ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
  666. return widths;
  667. }
  668. auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
  669. if (Tokens().begin() == Tokens().end()) {
  670. return;
  671. }
  672. PrintWidths widths = {};
  673. widths.index = ComputeDecimalPrintedWidth((token_infos_.size()));
  674. for (Token token : Tokens()) {
  675. widths.Widen(GetTokenPrintWidths(token));
  676. }
  677. for (Token token : Tokens()) {
  678. PrintToken(output_stream, token, widths);
  679. output_stream << "\n";
  680. }
  681. }
  682. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  683. Token token) const -> void {
  684. PrintToken(output_stream, token, {});
  685. }
  686. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
  687. PrintWidths widths) const -> void {
  688. widths.Widen(GetTokenPrintWidths(token));
  689. int token_index = token.index_;
  690. auto& token_info = GetTokenInfo(token);
  691. llvm::StringRef token_text = GetTokenText(token);
  692. // Output the main chunk using one format string. We have to do the
  693. // justification manually in order to use the dynamically computed widths
  694. // and get the quotes included.
  695. output_stream << llvm::formatv(
  696. "token: { index: {0}, kind: {1}, line: {2}, column: {3}, indent: {4}, "
  697. "spelling: '{5}'",
  698. llvm::format_decimal(token_index, widths.index),
  699. llvm::right_justify(
  700. (llvm::Twine("'") + token_info.kind.Name() + "'").str(),
  701. widths.kind + 2),
  702. llvm::format_decimal(GetLineNumber(token_info.token_line), widths.line),
  703. llvm::format_decimal(GetColumnNumber(token), widths.column),
  704. llvm::format_decimal(GetIndentColumnNumber(token_info.token_line),
  705. widths.indent),
  706. token_text);
  707. if (token_info.kind == TokenKind::Identifier()) {
  708. output_stream << ", identifier: " << GetIdentifier(token).index_;
  709. } else if (token_info.kind.IsOpeningSymbol()) {
  710. output_stream << ", closing_token: "
  711. << GetMatchedClosingToken(token).index_;
  712. } else if (token_info.kind.IsClosingSymbol()) {
  713. output_stream << ", opening_token: "
  714. << GetMatchedOpeningToken(token).index_;
  715. } else if (token_info.kind == TokenKind::StringLiteral()) {
  716. output_stream << ", value: `" << GetStringLiteral(token) << "`";
  717. }
  718. // TODO: Include value for numeric literals.
  719. if (token_info.has_trailing_space) {
  720. output_stream << ", has_trailing_space: true";
  721. }
  722. if (token_info.is_recovery) {
  723. output_stream << ", recovery: true";
  724. }
  725. output_stream << " }";
  726. }
  727. auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
  728. return line_infos_[line.index_];
  729. }
  730. auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
  731. return line_infos_[line.index_];
  732. }
  733. auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
  734. line_infos_.push_back(info);
  735. return Line(static_cast<int>(line_infos_.size()) - 1);
  736. }
  737. auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
  738. return token_infos_[token.index_];
  739. }
  740. auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
  741. return token_infos_[token.index_];
  742. }
  743. auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
  744. token_infos_.push_back(info);
  745. return Token(static_cast<int>(token_infos_.size()) - 1);
  746. }
  747. auto TokenizedBuffer::TokenIterator::Print(llvm::raw_ostream& output) const
  748. -> void {
  749. output << token_.index_;
  750. }
  751. auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
  752. const char* loc) -> Diagnostic::Location {
  753. assert(llvm::is_sorted(std::array{buffer_->source_->Text().begin(), loc,
  754. buffer_->source_->Text().end()}) &&
  755. "location not within buffer");
  756. int64_t offset = loc - buffer_->source_->Text().begin();
  757. // Find the first line starting after the given location. Note that we can't
  758. // inspect `line.length` here because it is not necessarily correct for the
  759. // final line during lexing (but will be correct later for the parse tree).
  760. auto line_it = std::partition_point(
  761. buffer_->line_infos_.begin(), buffer_->line_infos_.end(),
  762. [offset](const LineInfo& line) { return line.start <= offset; });
  763. bool incomplete_line_info = last_line_lexed_to_column_ != nullptr &&
  764. line_it == buffer_->line_infos_.end();
  765. // Step back one line to find the line containing the given position.
  766. CHECK(line_it != buffer_->line_infos_.begin())
  767. << "location precedes the start of the first line";
  768. --line_it;
  769. int line_number = line_it - buffer_->line_infos_.begin();
  770. int column_number = offset - line_it->start;
  771. // We might still be lexing the last line. If so, check to see if there are
  772. // any newline characters between the position we've finished lexing up to
  773. // and the given location.
  774. if (incomplete_line_info && column_number > *last_line_lexed_to_column_) {
  775. column_number = *last_line_lexed_to_column_;
  776. for (int64_t i = line_it->start + *last_line_lexed_to_column_; i != offset;
  777. ++i) {
  778. if (buffer_->source_->Text()[i] == '\n') {
  779. ++line_number;
  780. column_number = 0;
  781. } else {
  782. ++column_number;
  783. }
  784. }
  785. }
  786. return {.file_name = buffer_->source_->Filename().str(),
  787. .line_number = line_number + 1,
  788. .column_number = column_number + 1};
  789. }
  790. auto TokenizedBuffer::TokenLocationTranslator::GetLocation(Token token)
  791. -> Diagnostic::Location {
  792. // Map the token location into a position within the source buffer.
  793. auto& token_info = buffer_->GetTokenInfo(token);
  794. auto& line_info = buffer_->GetLineInfo(token_info.token_line);
  795. const char* token_start =
  796. buffer_->source_->Text().begin() + line_info.start + token_info.column;
  797. // Find the corresponding file location.
  798. // TODO: Should we somehow indicate in the diagnostic location if this token
  799. // is a recovery token that doesn't correspond to the original source?
  800. return SourceBufferLocationTranslator(*buffer_, last_line_lexed_to_column_)
  801. .GetLocation(token_start);
  802. }
  803. } // namespace Carbon