tokenized_buffer.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "lexer/tokenized_buffer.h"
  5. #include <algorithm>
  6. #include <array>
  7. #include <cmath>
  8. #include <iterator>
  9. #include <string>
  10. #include "lexer/character_set.h"
  11. #include "lexer/numeric_literal.h"
  12. #include "lexer/string_literal.h"
  13. #include "llvm/ADT/STLExtras.h"
  14. #include "llvm/ADT/StringRef.h"
  15. #include "llvm/ADT/StringSwitch.h"
  16. #include "llvm/ADT/Twine.h"
  17. #include "llvm/Support/ErrorHandling.h"
  18. #include "llvm/Support/Format.h"
  19. #include "llvm/Support/FormatVariadic.h"
  20. #include "llvm/Support/raw_ostream.h"
  21. namespace Carbon {
  22. struct TrailingComment : SimpleDiagnostic<TrailingComment> {
  23. static constexpr llvm::StringLiteral ShortName = "syntax-comments";
  24. static constexpr llvm::StringLiteral Message =
  25. "Trailing comments are not permitted.";
  26. };
  27. struct NoWhitespaceAfterCommentIntroducer
  28. : SimpleDiagnostic<NoWhitespaceAfterCommentIntroducer> {
  29. static constexpr llvm::StringLiteral ShortName = "syntax-comments";
  30. static constexpr llvm::StringLiteral Message =
  31. "Whitespace is required after '//'.";
  32. };
  33. struct UnmatchedClosing : SimpleDiagnostic<UnmatchedClosing> {
  34. static constexpr llvm::StringLiteral ShortName = "syntax-balanced-delimiters";
  35. static constexpr llvm::StringLiteral Message =
  36. "Closing symbol without a corresponding opening symbol.";
  37. };
  38. struct MismatchedClosing : SimpleDiagnostic<MismatchedClosing> {
  39. static constexpr llvm::StringLiteral ShortName = "syntax-balanced-delimiters";
  40. static constexpr llvm::StringLiteral Message =
  41. "Closing symbol does not match most recent opening symbol.";
  42. };
  43. struct UnrecognizedCharacters : SimpleDiagnostic<UnrecognizedCharacters> {
  44. static constexpr llvm::StringLiteral ShortName =
  45. "syntax-unrecognized-characters";
  46. static constexpr llvm::StringLiteral Message =
  47. "Encountered unrecognized characters while parsing.";
  48. };
  49. // Implementation of the lexer logic itself.
  50. //
  51. // The design is that lexing can loop over the source buffer, consuming it into
  52. // tokens by calling into this API. This class handles the state and breaks down
  53. // the different lexing steps that may be used. It directly updates the provided
  54. // tokenized buffer with the lexed tokens.
  55. class TokenizedBuffer::Lexer {
  56. TokenizedBuffer& buffer;
  57. SourceBufferLocationTranslator translator;
  58. LexerDiagnosticEmitter emitter;
  59. TokenLocationTranslator token_translator;
  60. TokenDiagnosticEmitter token_emitter;
  61. Line current_line;
  62. LineInfo* current_line_info;
  63. int current_column = 0;
  64. bool set_indent = false;
  65. llvm::SmallVector<Token, 8> open_groups;
  66. public:
  67. Lexer(TokenizedBuffer& buffer, DiagnosticConsumer& consumer)
  68. : buffer(buffer),
  69. translator(buffer),
  70. emitter(translator, consumer),
  71. token_translator(buffer),
  72. token_emitter(token_translator, consumer),
  73. current_line(buffer.AddLine({0, 0, 0})),
  74. current_line_info(&buffer.GetLineInfo(current_line)) {}
  75. // Symbolic result of a lexing action. This indicates whether we successfully
  76. // lexed a token, or whether other lexing actions should be attempted.
  77. //
  78. // While it wraps a simple boolean state, its API both helps make the failures
  79. // more self documenting, and by consuming the actual token constructively
  80. // when one is produced, it helps ensure the correct result is returned.
  81. class LexResult {
  82. bool formed_token;
  83. explicit LexResult(bool formed_token) : formed_token(formed_token) {}
  84. public:
  85. // Consumes (and discard) a valid token to construct a result
  86. // indicating a token has been produced.
  87. LexResult(Token) : LexResult(true) {}
  88. // Returns a result indicating no token was produced.
  89. static LexResult NoMatch() { return LexResult(false); }
  90. // Tests whether a token was produced by the lexing routine, and
  91. // the lexer can continue forming tokens.
  92. explicit operator bool() const { return formed_token; }
  93. };
  94. // Perform the necessary bookkeeping to step past a newline at the current
  95. // line and column.
  96. auto HandleNewline() -> void {
  97. current_line_info->length = current_column;
  98. current_line =
  99. buffer.AddLine({current_line_info->start + current_column + 1, 0, 0});
  100. current_line_info = &buffer.GetLineInfo(current_line);
  101. current_column = 0;
  102. set_indent = false;
  103. }
  104. auto SkipWhitespace(llvm::StringRef& source_text) -> bool {
  105. while (!source_text.empty()) {
  106. // We only support line-oriented commenting and lex comments as-if they
  107. // were whitespace.
  108. if (source_text.startswith("//")) {
  109. // Any comment must be the only non-whitespace on the line.
  110. if (set_indent) {
  111. emitter.EmitError<TrailingComment>(source_text.begin());
  112. }
  113. // The introducer '//' must be followed by whitespace or EOF.
  114. if (source_text.size() > 2 && !IsSpace(source_text[2])) {
  115. emitter.EmitError<NoWhitespaceAfterCommentIntroducer>(
  116. source_text.begin() + 2);
  117. }
  118. while (!source_text.empty() && source_text.front() != '\n') {
  119. ++current_column;
  120. source_text = source_text.drop_front();
  121. }
  122. if (source_text.empty()) {
  123. break;
  124. }
  125. }
  126. switch (source_text.front()) {
  127. default:
  128. // If we find a non-whitespace character without exhausting the
  129. // buffer, return true to continue lexing.
  130. assert(!IsSpace(source_text.front()));
  131. return true;
  132. case '\n':
  133. // If this is the last character in the source, directly return here
  134. // to avoid creating an empty line.
  135. source_text = source_text.drop_front();
  136. if (source_text.empty()) {
  137. current_line_info->length = current_column;
  138. return false;
  139. }
  140. // Otherwise, add a line and set up to continue lexing.
  141. HandleNewline();
  142. continue;
  143. case ' ':
  144. case '\t':
  145. // Skip other forms of whitespace while tracking column.
  146. // FIXME: This obviously needs looooots more work to handle unicode
  147. // whitespace as well as special handling to allow better tokenization
  148. // of operators. This is just a stub to check that our column
  149. // management works.
  150. ++current_column;
  151. source_text = source_text.drop_front();
  152. continue;
  153. }
  154. }
  155. assert(source_text.empty() && "Cannot reach here w/o finishing the text!");
  156. // Update the line length as this is also the end of a line.
  157. current_line_info->length = current_column;
  158. return false;
  159. }
  160. auto LexNumericLiteral(llvm::StringRef& source_text) -> LexResult {
  161. llvm::Optional<LexedNumericLiteral> literal =
  162. LexedNumericLiteral::Lex(source_text);
  163. if (!literal) {
  164. return LexResult::NoMatch();
  165. }
  166. int int_column = current_column;
  167. int token_size = literal->Text().size();
  168. current_column += token_size;
  169. source_text = source_text.drop_front(token_size);
  170. if (!set_indent) {
  171. current_line_info->indent = int_column;
  172. set_indent = true;
  173. }
  174. LexedNumericLiteral::Parser literal_parser(emitter, *literal);
  175. if (!literal_parser.Check()) {
  176. auto token = buffer.AddToken({
  177. .kind = TokenKind::Error(),
  178. .token_line = current_line,
  179. .column = int_column,
  180. .error_length = token_size,
  181. });
  182. return token;
  183. }
  184. if (literal_parser.IsInteger()) {
  185. auto token = buffer.AddToken({.kind = TokenKind::IntegerLiteral(),
  186. .token_line = current_line,
  187. .column = int_column});
  188. buffer.GetTokenInfo(token).literal_index =
  189. buffer.literal_int_storage.size();
  190. buffer.literal_int_storage.push_back(literal_parser.GetMantissa());
  191. return token;
  192. } else {
  193. auto token = buffer.AddToken({.kind = TokenKind::RealLiteral(),
  194. .token_line = current_line,
  195. .column = int_column});
  196. buffer.GetTokenInfo(token).literal_index =
  197. buffer.literal_int_storage.size();
  198. buffer.literal_int_storage.push_back(literal_parser.GetMantissa());
  199. buffer.literal_int_storage.push_back(literal_parser.GetExponent());
  200. return token;
  201. }
  202. }
  203. auto LexStringLiteral(llvm::StringRef& source_text) -> LexResult {
  204. llvm::Optional<LexedStringLiteral> literal =
  205. LexedStringLiteral::Lex(source_text);
  206. if (!literal) {
  207. return LexResult::NoMatch();
  208. }
  209. Line string_line = current_line;
  210. int string_column = current_column;
  211. int literal_size = literal->Text().size();
  212. source_text = source_text.drop_front(literal_size);
  213. if (!set_indent) {
  214. current_line_info->indent = string_column;
  215. set_indent = true;
  216. }
  217. // Update line and column information.
  218. if (!literal->IsMultiLine()) {
  219. current_column += literal_size;
  220. } else {
  221. for (char c : literal->Text()) {
  222. if (c == '\n') {
  223. HandleNewline();
  224. // The indentation of all lines in a multi-line string literal is
  225. // that of the first line.
  226. current_line_info->indent = string_column;
  227. set_indent = true;
  228. } else {
  229. ++current_column;
  230. }
  231. }
  232. }
  233. auto token = buffer.AddToken({.kind = TokenKind::StringLiteral(),
  234. .token_line = string_line,
  235. .column = string_column});
  236. buffer.GetTokenInfo(token).literal_index =
  237. buffer.literal_string_storage.size();
  238. buffer.literal_string_storage.push_back(literal->ComputeValue(emitter));
  239. return token;
  240. }
  241. auto LexSymbolToken(llvm::StringRef& source_text) -> LexResult {
  242. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text)
  243. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  244. .StartsWith(Spelling, TokenKind::Name())
  245. #include "lexer/token_registry.def"
  246. .Default(TokenKind::Error());
  247. if (kind == TokenKind::Error()) {
  248. return LexResult::NoMatch();
  249. }
  250. if (!set_indent) {
  251. current_line_info->indent = current_column;
  252. set_indent = true;
  253. }
  254. CloseInvalidOpenGroups(kind);
  255. const char* location = source_text.begin();
  256. Token token = buffer.AddToken(
  257. {.kind = kind, .token_line = current_line, .column = current_column});
  258. current_column += kind.GetFixedSpelling().size();
  259. source_text = source_text.drop_front(kind.GetFixedSpelling().size());
  260. // Opening symbols just need to be pushed onto our queue of opening groups.
  261. if (kind.IsOpeningSymbol()) {
  262. open_groups.push_back(token);
  263. return token;
  264. }
  265. // Only closing symbols need further special handling.
  266. if (!kind.IsClosingSymbol()) {
  267. return token;
  268. }
  269. TokenInfo& closing_token_info = buffer.GetTokenInfo(token);
  270. // Check that there is a matching opening symbol before we consume this as
  271. // a closing symbol.
  272. if (open_groups.empty()) {
  273. closing_token_info.kind = TokenKind::Error();
  274. closing_token_info.error_length = kind.GetFixedSpelling().size();
  275. emitter.EmitError<UnmatchedClosing>(location);
  276. // Note that this still returns true as we do consume a symbol.
  277. return token;
  278. }
  279. // Finally can handle a normal closing symbol.
  280. Token opening_token = open_groups.pop_back_val();
  281. TokenInfo& opening_token_info = buffer.GetTokenInfo(opening_token);
  282. opening_token_info.closing_token = token;
  283. closing_token_info.opening_token = opening_token;
  284. return token;
  285. }
  286. // Closes all open groups that cannot remain open across the symbol `K`.
  287. // Users may pass `Error` to close all open groups.
  288. auto CloseInvalidOpenGroups(TokenKind kind) -> void {
  289. if (!kind.IsClosingSymbol() && kind != TokenKind::Error()) {
  290. return;
  291. }
  292. while (!open_groups.empty()) {
  293. Token opening_token = open_groups.back();
  294. TokenKind opening_kind = buffer.GetTokenInfo(opening_token).kind;
  295. if (kind == opening_kind.GetClosingSymbol()) {
  296. return;
  297. }
  298. open_groups.pop_back();
  299. token_emitter.EmitError<MismatchedClosing>(opening_token);
  300. // TODO: do a smarter backwards scan for where to put the closing
  301. // token.
  302. Token closing_token =
  303. buffer.AddToken({.kind = opening_kind.GetClosingSymbol(),
  304. .is_recovery = true,
  305. .token_line = current_line,
  306. .column = current_column});
  307. TokenInfo& opening_token_info = buffer.GetTokenInfo(opening_token);
  308. TokenInfo& closing_token_info = buffer.GetTokenInfo(closing_token);
  309. opening_token_info.closing_token = closing_token;
  310. closing_token_info.opening_token = opening_token;
  311. }
  312. }
  313. auto GetOrCreateIdentifier(llvm::StringRef text) -> Identifier {
  314. auto insert_result = buffer.identifier_map.insert(
  315. {text, Identifier(buffer.identifier_infos.size())});
  316. if (insert_result.second) {
  317. buffer.identifier_infos.push_back({text});
  318. }
  319. return insert_result.first->second;
  320. }
  321. auto LexKeywordOrIdentifier(llvm::StringRef& source_text) -> LexResult {
  322. if (!IsAlpha(source_text.front()) && source_text.front() != '_') {
  323. return LexResult::NoMatch();
  324. }
  325. if (!set_indent) {
  326. current_line_info->indent = current_column;
  327. set_indent = true;
  328. }
  329. // Take the valid characters off the front of the source buffer.
  330. llvm::StringRef identifier_text =
  331. source_text.take_while([](char c) { return IsAlnum(c) || c == '_'; });
  332. assert(!identifier_text.empty() && "Must have at least one character!");
  333. int identifier_column = current_column;
  334. current_column += identifier_text.size();
  335. source_text = source_text.drop_front(identifier_text.size());
  336. // Check if the text matches a keyword token, and if so use that.
  337. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  338. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name())
  339. #include "lexer/token_registry.def"
  340. .Default(TokenKind::Error());
  341. if (kind != TokenKind::Error()) {
  342. return buffer.AddToken({.kind = kind,
  343. .token_line = current_line,
  344. .column = identifier_column});
  345. }
  346. // Otherwise we have a generic identifier.
  347. return buffer.AddToken({.kind = TokenKind::Identifier(),
  348. .token_line = current_line,
  349. .column = identifier_column,
  350. .id = GetOrCreateIdentifier(identifier_text)});
  351. }
  352. auto LexError(llvm::StringRef& source_text) -> LexResult {
  353. llvm::StringRef error_text = source_text.take_while([](char c) {
  354. if (IsAlnum(c)) {
  355. return false;
  356. }
  357. switch (c) {
  358. case '_':
  359. case '\t':
  360. case '\n':
  361. return false;
  362. }
  363. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  364. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  365. #include "lexer/token_registry.def"
  366. .Default(true);
  367. });
  368. if (error_text.empty()) {
  369. // TODO: Reimplement this to use the lexer properly. In the meantime,
  370. // guarantee that we eat at least one byte.
  371. error_text = source_text.take_front(1);
  372. }
  373. // Longer errors get to be two tokens.
  374. error_text = error_text.substr(0, std::numeric_limits<int32_t>::max());
  375. auto token = buffer.AddToken(
  376. {.kind = TokenKind::Error(),
  377. .token_line = current_line,
  378. .column = current_column,
  379. .error_length = static_cast<int32_t>(error_text.size())});
  380. emitter.EmitError<UnrecognizedCharacters>(error_text.begin());
  381. current_column += error_text.size();
  382. source_text = source_text.drop_front(error_text.size());
  383. return token;
  384. }
  385. auto AddEndOfFileToken() -> void {
  386. buffer.AddToken({.kind = TokenKind::EndOfFile(),
  387. .token_line = current_line,
  388. .column = current_column});
  389. }
  390. };
  391. auto TokenizedBuffer::Lex(SourceBuffer& source, DiagnosticConsumer& consumer)
  392. -> TokenizedBuffer {
  393. TokenizedBuffer buffer(source);
  394. ErrorTrackingDiagnosticConsumer error_tracking_consumer(consumer);
  395. Lexer lexer(buffer, error_tracking_consumer);
  396. llvm::StringRef source_text = source.Text();
  397. while (lexer.SkipWhitespace(source_text)) {
  398. // Each time we find non-whitespace characters, try each kind of token we
  399. // support lexing, from simplest to most complex.
  400. Lexer::LexResult result = lexer.LexSymbolToken(source_text);
  401. if (!result) {
  402. result = lexer.LexKeywordOrIdentifier(source_text);
  403. }
  404. if (!result) {
  405. result = lexer.LexNumericLiteral(source_text);
  406. }
  407. if (!result) {
  408. result = lexer.LexStringLiteral(source_text);
  409. }
  410. if (!result) {
  411. result = lexer.LexError(source_text);
  412. }
  413. assert(result && "No token was lexed.");
  414. }
  415. lexer.CloseInvalidOpenGroups(TokenKind::Error());
  416. lexer.AddEndOfFileToken();
  417. if (error_tracking_consumer.SeenError()) {
  418. buffer.has_errors = true;
  419. }
  420. return buffer;
  421. }
  422. auto TokenizedBuffer::GetKind(Token token) const -> TokenKind {
  423. return GetTokenInfo(token).kind;
  424. }
  425. auto TokenizedBuffer::GetLine(Token token) const -> Line {
  426. return GetTokenInfo(token).token_line;
  427. }
  428. auto TokenizedBuffer::GetLineNumber(Token token) const -> int {
  429. return GetLineNumber(GetLine(token));
  430. }
  431. auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
  432. return GetTokenInfo(token).column + 1;
  433. }
  434. auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
  435. auto& token_info = GetTokenInfo(token);
  436. llvm::StringRef fixed_spelling = token_info.kind.GetFixedSpelling();
  437. if (!fixed_spelling.empty()) {
  438. return fixed_spelling;
  439. }
  440. if (token_info.kind == TokenKind::Error()) {
  441. auto& line_info = GetLineInfo(token_info.token_line);
  442. int64_t token_start = line_info.start + token_info.column;
  443. return source->Text().substr(token_start, token_info.error_length);
  444. }
  445. // Refer back to the source text to preserve oddities like radix or digit
  446. // separators the author included.
  447. if (token_info.kind == TokenKind::IntegerLiteral() ||
  448. token_info.kind == TokenKind::RealLiteral()) {
  449. auto& line_info = GetLineInfo(token_info.token_line);
  450. int64_t token_start = line_info.start + token_info.column;
  451. llvm::Optional<LexedNumericLiteral> relexed_token =
  452. LexedNumericLiteral::Lex(source->Text().substr(token_start));
  453. assert(relexed_token && "Could not reform numeric literal token.");
  454. return relexed_token->Text();
  455. }
  456. // Refer back to the source text to find the original spelling, including
  457. // escape sequences etc.
  458. if (token_info.kind == TokenKind::StringLiteral()) {
  459. auto& line_info = GetLineInfo(token_info.token_line);
  460. int64_t token_start = line_info.start + token_info.column;
  461. llvm::Optional<LexedStringLiteral> relexed_token =
  462. LexedStringLiteral::Lex(source->Text().substr(token_start));
  463. assert(relexed_token && "Could not reform string literal token.");
  464. return relexed_token->Text();
  465. }
  466. if (token_info.kind == TokenKind::EndOfFile()) {
  467. return llvm::StringRef();
  468. }
  469. assert(token_info.kind == TokenKind::Identifier() &&
  470. "Only identifiers have stored text!");
  471. return GetIdentifierText(token_info.id);
  472. }
  473. auto TokenizedBuffer::GetIdentifier(Token token) const -> Identifier {
  474. auto& token_info = GetTokenInfo(token);
  475. assert(token_info.kind == TokenKind::Identifier() &&
  476. "The token must be an identifier!");
  477. return token_info.id;
  478. }
  479. auto TokenizedBuffer::GetIntegerLiteral(Token token) const
  480. -> const llvm::APInt& {
  481. auto& token_info = GetTokenInfo(token);
  482. assert(token_info.kind == TokenKind::IntegerLiteral() &&
  483. "The token must be an integer literal!");
  484. return literal_int_storage[token_info.literal_index];
  485. }
  486. auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealLiteralValue {
  487. auto& token_info = GetTokenInfo(token);
  488. assert(token_info.kind == TokenKind::RealLiteral() &&
  489. "The token must be a real literal!");
  490. // Note that every real literal is at least three characters long, so we can
  491. // safely look at the second character to determine whether we have a decimal
  492. // or hexadecimal literal.
  493. auto& line_info = GetLineInfo(token_info.token_line);
  494. int64_t token_start = line_info.start + token_info.column;
  495. char second_char = source->Text()[token_start + 1];
  496. bool is_decimal = second_char != 'x' && second_char != 'b';
  497. return RealLiteralValue(this, token_info.literal_index, is_decimal);
  498. }
  499. auto TokenizedBuffer::GetStringLiteral(Token token) const -> llvm::StringRef {
  500. auto& token_info = GetTokenInfo(token);
  501. assert(token_info.kind == TokenKind::StringLiteral() &&
  502. "The token must be a string literal!");
  503. return literal_string_storage[token_info.literal_index];
  504. }
  505. auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
  506. -> Token {
  507. auto& opening_token_info = GetTokenInfo(opening_token);
  508. assert(opening_token_info.kind.IsOpeningSymbol() &&
  509. "The token must be an opening group symbol!");
  510. return opening_token_info.closing_token;
  511. }
  512. auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
  513. -> Token {
  514. auto& closing_token_info = GetTokenInfo(closing_token);
  515. assert(closing_token_info.kind.IsClosingSymbol() &&
  516. "The token must be an closing group symbol!");
  517. return closing_token_info.opening_token;
  518. }
  519. auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
  520. return GetTokenInfo(token).is_recovery;
  521. }
  522. auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
  523. return line.index + 1;
  524. }
  525. auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
  526. return GetLineInfo(line).indent + 1;
  527. }
  528. auto TokenizedBuffer::GetIdentifierText(Identifier identifier) const
  529. -> llvm::StringRef {
  530. return identifier_infos[identifier.index].text;
  531. }
  532. auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
  533. index = std::max(widths.index, index);
  534. kind = std::max(widths.kind, kind);
  535. column = std::max(widths.column, column);
  536. line = std::max(widths.line, line);
  537. indent = std::max(widths.indent, indent);
  538. }
  539. // Compute the printed width of a number. When numbers are printed in decimal,
  540. // the number of digits needed is is one more than the log-base-10 of the value.
  541. // We handle a value of `zero` explicitly.
  542. //
  543. // This routine requires its argument to be *non-negative*.
  544. static auto ComputeDecimalPrintedWidth(int number) -> int {
  545. assert(number >= 0 && "Negative numbers are not supported.");
  546. if (number == 0) {
  547. return 1;
  548. }
  549. return static_cast<int>(std::log10(number)) + 1;
  550. }
  551. auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
  552. PrintWidths widths = {};
  553. widths.index = ComputeDecimalPrintedWidth(token_infos.size());
  554. widths.kind = GetKind(token).Name().size();
  555. widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
  556. widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
  557. widths.indent =
  558. ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
  559. return widths;
  560. }
  561. auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
  562. if (Tokens().begin() == Tokens().end()) {
  563. return;
  564. }
  565. PrintWidths widths = {};
  566. widths.index = ComputeDecimalPrintedWidth((token_infos.size()));
  567. for (Token token : Tokens()) {
  568. widths.Widen(GetTokenPrintWidths(token));
  569. }
  570. for (Token token : Tokens()) {
  571. PrintToken(output_stream, token, widths);
  572. output_stream << "\n";
  573. }
  574. }
  575. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  576. Token token) const -> void {
  577. PrintToken(output_stream, token, {});
  578. }
  579. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
  580. PrintWidths widths) const -> void {
  581. widths.Widen(GetTokenPrintWidths(token));
  582. int token_index = token.index;
  583. auto& token_info = GetTokenInfo(token);
  584. llvm::StringRef token_text = GetTokenText(token);
  585. // Output the main chunk using one format string. We have to do the
  586. // justification manually in order to use the dynamically computed widths
  587. // and get the quotes included.
  588. output_stream << llvm::formatv(
  589. "token: { index: {0}, kind: {1}, line: {2}, column: {3}, indent: {4}, "
  590. "spelling: '{5}'",
  591. llvm::format_decimal(token_index, widths.index),
  592. llvm::right_justify(
  593. (llvm::Twine("'") + token_info.kind.Name() + "'").str(),
  594. widths.kind + 2),
  595. llvm::format_decimal(GetLineNumber(token_info.token_line), widths.line),
  596. llvm::format_decimal(GetColumnNumber(token), widths.column),
  597. llvm::format_decimal(GetIndentColumnNumber(token_info.token_line),
  598. widths.indent),
  599. token_text);
  600. if (token_info.kind == TokenKind::Identifier()) {
  601. output_stream << ", identifier: " << GetIdentifier(token).index;
  602. } else if (token_info.kind.IsOpeningSymbol()) {
  603. output_stream << ", closing_token: " << GetMatchedClosingToken(token).index;
  604. } else if (token_info.kind.IsClosingSymbol()) {
  605. output_stream << ", opening_token: " << GetMatchedOpeningToken(token).index;
  606. } else if (token_info.kind == TokenKind::StringLiteral()) {
  607. output_stream << ", value: `" << GetStringLiteral(token) << "`";
  608. }
  609. // TODO: Include value for numeric literals.
  610. if (token_info.is_recovery) {
  611. output_stream << ", recovery: true";
  612. }
  613. output_stream << " }";
  614. }
  615. auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
  616. return line_infos[line.index];
  617. }
  618. auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
  619. return line_infos[line.index];
  620. }
  621. auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
  622. line_infos.push_back(info);
  623. return Line(static_cast<int>(line_infos.size()) - 1);
  624. }
  625. auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
  626. return token_infos[token.index];
  627. }
  628. auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
  629. return token_infos[token.index];
  630. }
  631. auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
  632. token_infos.push_back(info);
  633. return Token(static_cast<int>(token_infos.size()) - 1);
  634. }
  635. auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
  636. const char* loc) -> Diagnostic::Location {
  637. assert(llvm::is_sorted(std::array{buffer_->source->Text().begin(), loc,
  638. buffer_->source->Text().end()}) &&
  639. "location not within buffer");
  640. int64_t offset = loc - buffer_->source->Text().begin();
  641. // Find the first line starting after the given location. Note that we can't
  642. // inspect `line.length` here because it is not necessarily correct for the
  643. // final line.
  644. auto line_it = std::partition_point(
  645. buffer_->line_infos.begin(), buffer_->line_infos.end(),
  646. [offset](const LineInfo& line) { return line.start <= offset; });
  647. bool incomplete_line_info = line_it == buffer_->line_infos.end();
  648. // Step back one line to find the line containing the given position.
  649. assert(line_it != buffer_->line_infos.begin() &&
  650. "location precedes the start of the first line");
  651. --line_it;
  652. int line_number = line_it - buffer_->line_infos.begin();
  653. int column_number = offset - line_it->start;
  654. // We might still be lexing the last line. If so, check to see if there are
  655. // any newline characters between the start of this line and the given
  656. // location.
  657. if (incomplete_line_info) {
  658. column_number = 0;
  659. for (int64_t i = line_it->start; i != offset; ++i) {
  660. if (buffer_->source->Text()[i] == '\n') {
  661. ++line_number;
  662. column_number = 0;
  663. } else {
  664. ++column_number;
  665. }
  666. }
  667. }
  668. return {.file_name = buffer_->source->Filename().str(),
  669. .line_number = line_number + 1,
  670. .column_number = column_number + 1};
  671. }
  672. auto TokenizedBuffer::TokenLocationTranslator::GetLocation(Token token)
  673. -> Diagnostic::Location {
  674. // Map the token location into a position within the source buffer.
  675. auto& token_info = buffer_->GetTokenInfo(token);
  676. auto& line_info = buffer_->GetLineInfo(token_info.token_line);
  677. const char* token_start =
  678. buffer_->source->Text().begin() + line_info.start + token_info.column;
  679. // Find the corresponding file location.
  680. // TODO: Should we somehow indicate in the diagnostic location if this token
  681. // is a recovery token that doesn't correspond to the original source?
  682. return SourceBufferLocationTranslator(*buffer_).GetLocation(token_start);
  683. }
  684. } // namespace Carbon