tokenized_buffer.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "lexer/tokenized_buffer.h"
  5. #include <algorithm>
  6. #include <cmath>
  7. #include <string>
  8. #include "llvm/ADT/StringExtras.h"
  9. #include "llvm/ADT/StringRef.h"
  10. #include "llvm/ADT/StringSwitch.h"
  11. #include "llvm/Support/ErrorHandling.h"
  12. #include "llvm/Support/Format.h"
  13. #include "llvm/Support/FormatVariadic.h"
  14. #include "llvm/Support/raw_ostream.h"
  15. namespace Carbon {
  16. static auto TakeLeadingIntegerLiteral(llvm::StringRef source_text)
  17. -> llvm::StringRef {
  18. return source_text.take_while([](char c) { return llvm::isDigit(c); });
  19. }
  20. struct UnmatchedClosing {
  21. static constexpr llvm::StringLiteral ShortName = "syntax-balanced-delimiters";
  22. static constexpr llvm::StringLiteral Message =
  23. "Closing symbol without a corresponding opening symbol.";
  24. struct Substitutions {};
  25. static auto Format(const Substitutions&) -> std::string {
  26. return Message.str();
  27. }
  28. };
  29. struct MismatchedClosing {
  30. static constexpr llvm::StringLiteral ShortName = "syntax-balanced-delimiters";
  31. static constexpr llvm::StringLiteral Message =
  32. "Closing symbol does not match most recent opening symbol.";
  33. struct Substitutions {};
  34. static auto Format(const Substitutions&) -> std::string {
  35. return Message.str();
  36. }
  37. };
  38. struct UnrecognizedCharacters {
  39. static constexpr llvm::StringLiteral ShortName =
  40. "syntax-unrecognized-characters";
  41. static constexpr llvm::StringLiteral Message =
  42. "Encountered unrecognized characters while parsing.";
  43. struct Substitutions {};
  44. static auto Format(const Substitutions&) -> std::string {
  45. return Message.str();
  46. }
  47. };
  48. // Implementation of the lexer logic itself.
  49. //
  50. // The design is that lexing can loop over the source buffer, consuming it into
  51. // tokens by calling into this API. This class handles the state and breaks down
  52. // the different lexing steps that may be used. It directly updates the provided
  53. // tokenized buffer with the lexed tokens.
  54. class TokenizedBuffer::Lexer {
  55. TokenizedBuffer& buffer;
  56. DiagnosticEmitter& emitter;
  57. Line current_line;
  58. LineInfo* current_line_info;
  59. int current_column = 0;
  60. bool set_indent = false;
  61. llvm::SmallVector<Token, 8> open_groups;
  62. public:
  63. Lexer(TokenizedBuffer& buffer, DiagnosticEmitter& emitter)
  64. : buffer(buffer),
  65. emitter(emitter),
  66. current_line(buffer.AddLine({0, 0, 0})),
  67. current_line_info(&buffer.GetLineInfo(current_line)) {}
  68. auto SkipWhitespace(llvm::StringRef& source_text) -> bool {
  69. while (!source_text.empty()) {
  70. // We only support line-oriented commenting and lex comments as-if they
  71. // were whitespace. Any comment must be the only non-whitespace on the
  72. // line.
  73. if (source_text.startswith("//") && !set_indent) {
  74. // Check if the comment has a special starting sequence of three slashes
  75. // followed by a space. This represents a documentation comment that is
  76. // preserved as a token in the buffer. When parsing, these comments will
  77. // only be accepted in specific parts of the grammar and will be
  78. // associated with the parsed constructs as structure documentation. All
  79. // other comments are simply treated as whitespace.
  80. if (source_text.startswith("///")) {
  81. current_line_info->indent = current_column;
  82. set_indent = true;
  83. buffer.AddToken({.kind = TokenKind::DocComment(),
  84. .token_line = current_line,
  85. .column = current_column});
  86. }
  87. while (!source_text.empty() && source_text.front() != '\n') {
  88. ++current_column;
  89. source_text = source_text.drop_front();
  90. }
  91. if (source_text.empty()) {
  92. break;
  93. }
  94. }
  95. switch (source_text.front()) {
  96. default:
  97. // If we find a non-whitespace character without exhausting the
  98. // buffer, return true to continue lexing.
  99. return true;
  100. case '\n':
  101. // New lines are special in order to track line structure.
  102. current_line_info->length = current_column;
  103. // If this is the last character in the source, directly return here
  104. // to avoid creating an empty line.
  105. source_text = source_text.drop_front();
  106. if (source_text.empty()) {
  107. return false;
  108. }
  109. // Otherwise, add a line and set up to continue lexing.
  110. current_line = buffer.AddLine(
  111. {current_line_info->start + current_column + 1, 0, 0});
  112. current_line_info = &buffer.GetLineInfo(current_line);
  113. current_column = 0;
  114. set_indent = false;
  115. continue;
  116. case ' ':
  117. case '\t':
  118. // Skip other forms of whitespace while tracking column.
  119. // FIXME: This obviously needs looooots more work to handle unicode
  120. // whitespace as well as special handling to allow better tokenization
  121. // of operators. This is just a stub to check that our column
  122. // management works.
  123. ++current_column;
  124. source_text = source_text.drop_front();
  125. continue;
  126. }
  127. }
  128. assert(source_text.empty() && "Cannot reach here w/o finishing the text!");
  129. // Update the line length as this is also the end of a line.
  130. current_line_info->length = current_column;
  131. return false;
  132. }
  133. auto LexIntegerLiteral(llvm::StringRef& source_text) -> bool {
  134. llvm::StringRef int_text = TakeLeadingIntegerLiteral(source_text);
  135. if (int_text.empty()) {
  136. return false;
  137. }
  138. llvm::APInt int_value;
  139. if (int_text.getAsInteger(/*Radix=*/0, int_value)) {
  140. return false;
  141. }
  142. int int_column = current_column;
  143. current_column += int_text.size();
  144. source_text = source_text.drop_front(int_text.size());
  145. if (!set_indent) {
  146. current_line_info->indent = int_column;
  147. set_indent = true;
  148. }
  149. auto token = buffer.AddToken({.kind = TokenKind::IntegerLiteral(),
  150. .token_line = current_line,
  151. .column = int_column});
  152. buffer.GetTokenInfo(token).literal_index = buffer.int_literals.size();
  153. buffer.int_literals.push_back(std::move(int_value));
  154. return true;
  155. }
  156. auto LexSymbolToken(llvm::StringRef& source_text) -> bool {
  157. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text)
  158. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  159. .StartsWith(Spelling, TokenKind::Name())
  160. #include "lexer/token_registry.def"
  161. .Default(TokenKind::Error());
  162. if (kind == TokenKind::Error()) {
  163. return false;
  164. }
  165. if (!set_indent) {
  166. current_line_info->indent = current_column;
  167. set_indent = true;
  168. }
  169. CloseInvalidOpenGroups(kind);
  170. Token token = buffer.AddToken(
  171. {.kind = kind, .token_line = current_line, .column = current_column});
  172. current_column += kind.GetFixedSpelling().size();
  173. source_text = source_text.drop_front(kind.GetFixedSpelling().size());
  174. // Opening symbols just need to be pushed onto our queue of opening groups.
  175. if (kind.IsOpeningSymbol()) {
  176. open_groups.push_back(token);
  177. return true;
  178. }
  179. // Only closing symbols need further special handling.
  180. if (!kind.IsClosingSymbol()) {
  181. return true;
  182. }
  183. TokenInfo& closing_token_info = buffer.GetTokenInfo(token);
  184. // Check that there is a matching opening symbol before we consume this as
  185. // a closing symbol.
  186. if (open_groups.empty()) {
  187. closing_token_info.kind = TokenKind::Error();
  188. closing_token_info.error_length = kind.GetFixedSpelling().size();
  189. buffer.has_errors = true;
  190. emitter.EmitError<UnmatchedClosing>(
  191. [](UnmatchedClosing::Substitutions&) {});
  192. // Note that this still returns true as we do consume a symbol.
  193. return true;
  194. }
  195. // Finally can handle a normal closing symbol.
  196. Token opening_token = open_groups.pop_back_val();
  197. TokenInfo& opening_token_info = buffer.GetTokenInfo(opening_token);
  198. opening_token_info.closing_token = token;
  199. closing_token_info.opening_token = opening_token;
  200. return true;
  201. }
  202. // Closes all open groups that cannot remain open across the symbol `K`.
  203. // Users may pass `Error` to close all open groups.
  204. auto CloseInvalidOpenGroups(TokenKind kind) -> void {
  205. if (!kind.IsClosingSymbol() && kind != TokenKind::Error()) {
  206. return;
  207. }
  208. while (!open_groups.empty()) {
  209. Token opening_token = open_groups.back();
  210. TokenKind opening_kind = buffer.GetTokenInfo(opening_token).kind;
  211. if (kind == opening_kind.GetClosingSymbol()) {
  212. return;
  213. }
  214. open_groups.pop_back();
  215. buffer.has_errors = true;
  216. emitter.EmitError<MismatchedClosing>(
  217. [](MismatchedClosing::Substitutions&) {});
  218. // TODO: do a smarter backwards scan for where to put the closing
  219. // token.
  220. Token closing_token =
  221. buffer.AddToken({.kind = opening_kind.GetClosingSymbol(),
  222. .is_recovery = true,
  223. .token_line = current_line,
  224. .column = current_column});
  225. TokenInfo& opening_token_info = buffer.GetTokenInfo(opening_token);
  226. TokenInfo& closing_token_info = buffer.GetTokenInfo(closing_token);
  227. opening_token_info.closing_token = closing_token;
  228. closing_token_info.opening_token = opening_token;
  229. }
  230. }
  231. auto GetOrCreateIdentifier(llvm::StringRef text) -> Identifier {
  232. auto insert_result = buffer.identifier_map.insert(
  233. {text, Identifier(buffer.identifier_infos.size())});
  234. if (insert_result.second) {
  235. buffer.identifier_infos.push_back({text});
  236. }
  237. return insert_result.first->second;
  238. }
  239. auto LexKeywordOrIdentifier(llvm::StringRef& source_text) -> bool {
  240. if (!llvm::isAlpha(source_text.front()) && source_text.front() != '_') {
  241. return false;
  242. }
  243. if (!set_indent) {
  244. current_line_info->indent = current_column;
  245. set_indent = true;
  246. }
  247. // Take the valid characters off the front of the source buffer.
  248. llvm::StringRef identifier_text = source_text.take_while(
  249. [](char c) { return llvm::isAlnum(c) || c == '_'; });
  250. assert(!identifier_text.empty() && "Must have at least one character!");
  251. int identifier_column = current_column;
  252. current_column += identifier_text.size();
  253. source_text = source_text.drop_front(identifier_text.size());
  254. // Check if the text matches a keyword token, and if so use that.
  255. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  256. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name())
  257. #include "lexer/token_registry.def"
  258. .Default(TokenKind::Error());
  259. if (kind != TokenKind::Error()) {
  260. buffer.AddToken({.kind = kind,
  261. .token_line = current_line,
  262. .column = identifier_column});
  263. return true;
  264. }
  265. // Otherwise we have a generic identifier.
  266. buffer.AddToken({.kind = TokenKind::Identifier(),
  267. .token_line = current_line,
  268. .column = identifier_column,
  269. .id = GetOrCreateIdentifier(identifier_text)});
  270. return true;
  271. }
  272. auto LexError(llvm::StringRef& source_text) -> void {
  273. llvm::StringRef error_text = source_text.take_while([](char c) {
  274. if (llvm::isAlnum(c)) {
  275. return false;
  276. }
  277. switch (c) {
  278. case '_':
  279. case '\t':
  280. case '\n':
  281. return false;
  282. }
  283. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  284. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  285. #include "lexer/token_registry.def"
  286. .Default(true);
  287. });
  288. if (error_text.empty()) {
  289. // TODO: Reimplement this to use the lexer properly. In the meantime,
  290. // guarantee that we eat at least one byte.
  291. error_text = source_text.take_front(1);
  292. }
  293. // Longer errors get to be two tokens.
  294. error_text = error_text.substr(0, std::numeric_limits<int32_t>::max());
  295. auto token = buffer.AddToken(
  296. {.kind = TokenKind::Error(),
  297. .token_line = current_line,
  298. .column = current_column,
  299. .error_length = static_cast<int32_t>(error_text.size())});
  300. // TODO: #19 - Need to convert to the diagnostics library.
  301. llvm::errs() << "ERROR: Line " << buffer.GetLineNumber(token) << ", Column "
  302. << buffer.GetColumnNumber(token)
  303. << ": Unrecognized characters!\n";
  304. current_column += error_text.size();
  305. source_text = source_text.drop_front(error_text.size());
  306. buffer.has_errors = true;
  307. }
  308. };
  309. auto TokenizedBuffer::Lex(SourceBuffer& source, DiagnosticEmitter& emitter)
  310. -> TokenizedBuffer {
  311. TokenizedBuffer buffer(source);
  312. Lexer lexer(buffer, emitter);
  313. llvm::StringRef source_text = source.Text();
  314. while (lexer.SkipWhitespace(source_text)) {
  315. // Each time we find non-whitespace characters, try each kind of token we
  316. // support lexing, from simplest to most complex.
  317. if (lexer.LexSymbolToken(source_text)) {
  318. continue;
  319. }
  320. if (lexer.LexKeywordOrIdentifier(source_text)) {
  321. continue;
  322. }
  323. if (lexer.LexIntegerLiteral(source_text)) {
  324. continue;
  325. }
  326. lexer.LexError(source_text);
  327. }
  328. lexer.CloseInvalidOpenGroups(TokenKind::Error());
  329. return buffer;
  330. }
  331. auto TokenizedBuffer::GetKind(Token token) const -> TokenKind {
  332. return GetTokenInfo(token).kind;
  333. }
  334. auto TokenizedBuffer::GetLine(Token token) const -> Line {
  335. return GetTokenInfo(token).token_line;
  336. }
  337. auto TokenizedBuffer::GetLineNumber(Token token) const -> int {
  338. return GetLineNumber(GetLine(token));
  339. }
  340. auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
  341. return GetTokenInfo(token).column + 1;
  342. }
  343. auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
  344. auto& token_info = GetTokenInfo(token);
  345. llvm::StringRef fixed_spelling = token_info.kind.GetFixedSpelling();
  346. if (!fixed_spelling.empty()) {
  347. return fixed_spelling;
  348. }
  349. if (token_info.kind == TokenKind::Error()) {
  350. auto& line_info = GetLineInfo(token_info.token_line);
  351. int64_t token_start = line_info.start + token_info.column;
  352. return source->Text().substr(token_start, token_info.error_length);
  353. }
  354. // Documentation comment tokens refer back to the source text.
  355. if (token_info.kind == TokenKind::DocComment()) {
  356. auto& line_info = GetLineInfo(token_info.token_line);
  357. int64_t token_start = line_info.start + token_info.column;
  358. int64_t token_stop = line_info.start + line_info.length;
  359. return source->Text().slice(token_start, token_stop);
  360. }
  361. // Refer back to the source text to preserve oddities like radix or leading
  362. // 0's the author had.
  363. if (token_info.kind == TokenKind::IntegerLiteral()) {
  364. auto& line_info = GetLineInfo(token_info.token_line);
  365. int64_t token_start = line_info.start + token_info.column;
  366. return TakeLeadingIntegerLiteral(source->Text().substr(token_start));
  367. }
  368. assert(token_info.kind == TokenKind::Identifier() &&
  369. "Only identifiers have stored text!");
  370. return GetIdentifierText(token_info.id);
  371. }
  372. auto TokenizedBuffer::GetIdentifier(Token token) const -> Identifier {
  373. auto& token_info = GetTokenInfo(token);
  374. assert(token_info.kind == TokenKind::Identifier() &&
  375. "The token must be an identifier!");
  376. return token_info.id;
  377. }
  378. auto TokenizedBuffer::GetIntegerLiteral(Token token) const -> llvm::APInt {
  379. auto& token_info = GetTokenInfo(token);
  380. assert(token_info.kind == TokenKind::IntegerLiteral() &&
  381. "The token must be an integer literal!");
  382. return int_literals[token_info.literal_index];
  383. }
  384. auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
  385. -> Token {
  386. auto& opening_token_info = GetTokenInfo(opening_token);
  387. assert(opening_token_info.kind.IsOpeningSymbol() &&
  388. "The token must be an opening group symbol!");
  389. return opening_token_info.closing_token;
  390. }
  391. auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
  392. -> Token {
  393. auto& closing_token_info = GetTokenInfo(closing_token);
  394. assert(closing_token_info.kind.IsClosingSymbol() &&
  395. "The token must be an closing group symbol!");
  396. return closing_token_info.opening_token;
  397. }
  398. auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
  399. return GetTokenInfo(token).is_recovery;
  400. }
  401. auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
  402. return line.index + 1;
  403. }
  404. auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
  405. return GetLineInfo(line).indent + 1;
  406. }
  407. auto TokenizedBuffer::GetIdentifierText(Identifier identifier) const
  408. -> llvm::StringRef {
  409. return identifier_infos[identifier.index].text;
  410. }
  411. auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
  412. index = std::max(widths.index, index);
  413. kind = std::max(widths.kind, kind);
  414. column = std::max(widths.column, column);
  415. line = std::max(widths.line, line);
  416. indent = std::max(widths.indent, indent);
  417. }
  418. // Compute the printed width of a number. When numbers are printed in decimal,
  419. // the number of digits needed is is one more than the log-base-10 of the value.
  420. // We handle a value of `zero` explicitly.
  421. //
  422. // This routine requires its argument to be *non-negative*.
  423. static auto ComputeDecimalPrintedWidth(int number) -> int {
  424. assert(number >= 0 && "Negative numbers are not supported.");
  425. if (number == 0) {
  426. return 1;
  427. }
  428. return static_cast<int>(std::log10(number)) + 1;
  429. }
  430. auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
  431. PrintWidths widths = {};
  432. widths.index = ComputeDecimalPrintedWidth(token_infos.size());
  433. widths.kind = GetKind(token).Name().size();
  434. widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
  435. widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
  436. widths.indent =
  437. ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
  438. return widths;
  439. }
  440. auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
  441. if (Tokens().begin() == Tokens().end()) {
  442. return;
  443. }
  444. PrintWidths widths = {};
  445. widths.index = ComputeDecimalPrintedWidth((token_infos.size()));
  446. for (Token token : Tokens()) {
  447. widths.Widen(GetTokenPrintWidths(token));
  448. }
  449. for (Token token : Tokens()) {
  450. PrintToken(output_stream, token, widths);
  451. output_stream << "\n";
  452. }
  453. }
  454. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  455. Token token) const -> void {
  456. PrintToken(output_stream, token, {});
  457. }
  458. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
  459. PrintWidths widths) const -> void {
  460. widths.Widen(GetTokenPrintWidths(token));
  461. int token_index = token.index;
  462. auto& token_info = GetTokenInfo(token);
  463. llvm::StringRef token_text = GetTokenText(token);
  464. // Output the main chunk using one format string. We have to do the
  465. // justification manually in order to use the dynamically computed widths
  466. // and get the quotes included.
  467. output_stream << llvm::formatv(
  468. "token: { index: {0}, kind: {1}, line: {2}, column: {3}, indent: {4}, "
  469. "spelling: '{5}'",
  470. llvm::format_decimal(token_index, widths.index),
  471. llvm::right_justify(
  472. (llvm::Twine("'") + token_info.kind.Name() + "'").str(),
  473. widths.kind + 2),
  474. llvm::format_decimal(GetLineNumber(token_info.token_line), widths.line),
  475. llvm::format_decimal(GetColumnNumber(token), widths.column),
  476. llvm::format_decimal(GetIndentColumnNumber(token_info.token_line),
  477. widths.indent),
  478. token_text);
  479. if (token_info.kind == TokenKind::Identifier()) {
  480. output_stream << ", identifier: " << GetIdentifier(token).index;
  481. } else if (token_info.kind.IsOpeningSymbol()) {
  482. output_stream << ", closing_token: " << GetMatchedClosingToken(token).index;
  483. } else if (token_info.kind.IsClosingSymbol()) {
  484. output_stream << ", opening_token: " << GetMatchedOpeningToken(token).index;
  485. }
  486. if (token_info.is_recovery) {
  487. output_stream << ", recovery: true";
  488. }
  489. output_stream << " }";
  490. }
  491. auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
  492. return line_infos[line.index];
  493. }
  494. auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
  495. return line_infos[line.index];
  496. }
  497. auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
  498. line_infos.push_back(info);
  499. return Line(static_cast<int>(line_infos.size()) - 1);
  500. }
  501. auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
  502. return token_infos[token.index];
  503. }
  504. auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
  505. return token_infos[token.index];
  506. }
  507. auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
  508. token_infos.push_back(info);
  509. return Token(static_cast<int>(token_infos.size()) - 1);
  510. }
  511. } // namespace Carbon