parser_context.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/parser/parser_context.h"
  5. #include <cstdlib>
  6. #include <memory>
  7. #include <optional>
  8. #include "common/check.h"
  9. #include "toolchain/lexer/token_kind.h"
  10. #include "toolchain/lexer/tokenized_buffer.h"
  11. #include "toolchain/parser/parse_node_kind.h"
  12. #include "toolchain/parser/parse_tree.h"
  13. namespace Carbon {
  14. // A relative location for characters in errors.
  15. enum class RelativeLocation : int8_t {
  16. Around,
  17. After,
  18. Before,
  19. };
  20. // Adapts RelativeLocation for use with formatv.
  21. static auto operator<<(llvm::raw_ostream& out, RelativeLocation loc)
  22. -> llvm::raw_ostream& {
  23. switch (loc) {
  24. case RelativeLocation::Around:
  25. out << "around";
  26. break;
  27. case RelativeLocation::After:
  28. out << "after";
  29. break;
  30. case RelativeLocation::Before:
  31. out << "before";
  32. break;
  33. }
  34. return out;
  35. }
  36. ParserContext::ParserContext(ParseTree& tree, TokenizedBuffer& tokens,
  37. TokenDiagnosticEmitter& emitter,
  38. llvm::raw_ostream* vlog_stream)
  39. : tree_(&tree),
  40. tokens_(&tokens),
  41. emitter_(&emitter),
  42. vlog_stream_(vlog_stream),
  43. position_(tokens_->tokens().begin()),
  44. end_(tokens_->tokens().end()) {
  45. CARBON_CHECK(position_ != end_) << "Empty TokenizedBuffer";
  46. --end_;
  47. CARBON_CHECK(tokens_->GetKind(*end_) == TokenKind::EndOfFile)
  48. << "TokenizedBuffer should end with EndOfFile, ended with "
  49. << tokens_->GetKind(*end_);
  50. }
  51. auto ParserContext::AddLeafNode(ParseNodeKind kind,
  52. TokenizedBuffer::Token token, bool has_error)
  53. -> void {
  54. tree_->node_impls_.push_back(
  55. ParseTree::NodeImpl(kind, has_error, token, /*subtree_size=*/1));
  56. if (has_error) {
  57. tree_->has_errors_ = true;
  58. }
  59. }
  60. auto ParserContext::AddNode(ParseNodeKind kind, TokenizedBuffer::Token token,
  61. int subtree_start, bool has_error) -> void {
  62. int subtree_size = tree_->size() - subtree_start + 1;
  63. tree_->node_impls_.push_back(
  64. ParseTree::NodeImpl(kind, has_error, token, subtree_size));
  65. if (has_error) {
  66. tree_->has_errors_ = true;
  67. }
  68. }
  69. auto ParserContext::ConsumeAndAddOpenParen(TokenizedBuffer::Token default_token,
  70. ParseNodeKind start_kind)
  71. -> std::optional<TokenizedBuffer::Token> {
  72. if (auto open_paren = ConsumeIf(TokenKind::OpenParen)) {
  73. AddLeafNode(start_kind, *open_paren, /*has_error=*/false);
  74. return open_paren;
  75. } else {
  76. CARBON_DIAGNOSTIC(ExpectedParenAfter, Error, "Expected `(` after `{0}`.",
  77. TokenKind);
  78. emitter_->Emit(*position_, ExpectedParenAfter,
  79. tokens().GetKind(default_token));
  80. AddLeafNode(start_kind, default_token, /*has_error=*/true);
  81. return std::nullopt;
  82. }
  83. }
  84. auto ParserContext::ConsumeAndAddCloseSymbol(
  85. TokenizedBuffer::Token expected_open, StateStackEntry state,
  86. ParseNodeKind close_kind) -> void {
  87. TokenKind open_token_kind = tokens().GetKind(expected_open);
  88. if (!open_token_kind.is_opening_symbol()) {
  89. AddNode(close_kind, state.token, state.subtree_start, /*has_error=*/true);
  90. } else if (auto close_token = ConsumeIf(open_token_kind.closing_symbol())) {
  91. AddNode(close_kind, *close_token, state.subtree_start, state.has_error);
  92. } else {
  93. // TODO: Include the location of the matching opening delimiter in the
  94. // diagnostic.
  95. CARBON_DIAGNOSTIC(ExpectedCloseSymbol, Error,
  96. "Unexpected tokens before `{0}`.", llvm::StringRef);
  97. emitter_->Emit(*position_, ExpectedCloseSymbol,
  98. open_token_kind.closing_symbol().fixed_spelling());
  99. SkipTo(tokens().GetMatchedClosingToken(expected_open));
  100. AddNode(close_kind, Consume(), state.subtree_start, /*has_error=*/true);
  101. }
  102. }
  103. auto ParserContext::ConsumeAndAddLeafNodeIf(TokenKind token_kind,
  104. ParseNodeKind node_kind) -> bool {
  105. auto token = ConsumeIf(token_kind);
  106. if (!token) {
  107. return false;
  108. }
  109. AddLeafNode(node_kind, *token);
  110. return true;
  111. }
  112. auto ParserContext::ConsumeChecked(TokenKind kind) -> TokenizedBuffer::Token {
  113. CARBON_CHECK(PositionIs(kind))
  114. << "Required " << kind << ", found " << PositionKind();
  115. return Consume();
  116. }
  117. auto ParserContext::ConsumeIf(TokenKind kind)
  118. -> std::optional<TokenizedBuffer::Token> {
  119. if (!PositionIs(kind)) {
  120. return std::nullopt;
  121. }
  122. return Consume();
  123. }
  124. auto ParserContext::ConsumeIfPatternKeyword(TokenKind keyword_token,
  125. ParserState keyword_state,
  126. int subtree_start) -> void {
  127. if (auto token = ConsumeIf(keyword_token)) {
  128. PushState(ParserContext::StateStackEntry(
  129. keyword_state, PrecedenceGroup::ForTopLevelExpression(),
  130. PrecedenceGroup::ForTopLevelExpression(), *token, subtree_start));
  131. }
  132. }
  133. auto ParserContext::FindNextOf(std::initializer_list<TokenKind> desired_kinds)
  134. -> std::optional<TokenizedBuffer::Token> {
  135. auto new_position = position_;
  136. while (true) {
  137. TokenizedBuffer::Token token = *new_position;
  138. TokenKind kind = tokens().GetKind(token);
  139. if (kind.IsOneOf(desired_kinds)) {
  140. return token;
  141. }
  142. // Step to the next token at the current bracketing level.
  143. if (kind.is_closing_symbol() || kind == TokenKind::EndOfFile) {
  144. // There are no more tokens at this level.
  145. return std::nullopt;
  146. } else if (kind.is_opening_symbol()) {
  147. new_position = TokenizedBuffer::TokenIterator(
  148. tokens().GetMatchedClosingToken(token));
  149. // Advance past the closing token.
  150. ++new_position;
  151. } else {
  152. ++new_position;
  153. }
  154. }
  155. }
  156. auto ParserContext::SkipMatchingGroup() -> bool {
  157. if (!PositionKind().is_opening_symbol()) {
  158. return false;
  159. }
  160. SkipTo(tokens().GetMatchedClosingToken(*position_));
  161. ++position_;
  162. return true;
  163. }
  164. auto ParserContext::SkipPastLikelyEnd(TokenizedBuffer::Token skip_root)
  165. -> std::optional<TokenizedBuffer::Token> {
  166. if (position_ == end_) {
  167. return std::nullopt;
  168. }
  169. TokenizedBuffer::Line root_line = tokens().GetLine(skip_root);
  170. int root_line_indent = tokens().GetIndentColumnNumber(root_line);
  171. // We will keep scanning through tokens on the same line as the root or
  172. // lines with greater indentation than root's line.
  173. auto is_same_line_or_indent_greater_than_root =
  174. [&](TokenizedBuffer::Token t) {
  175. TokenizedBuffer::Line l = tokens().GetLine(t);
  176. if (l == root_line) {
  177. return true;
  178. }
  179. return tokens().GetIndentColumnNumber(l) > root_line_indent;
  180. };
  181. do {
  182. if (PositionIs(TokenKind::CloseCurlyBrace)) {
  183. // Immediately bail out if we hit an unmatched close curly, this will
  184. // pop us up a level of the syntax grouping.
  185. return std::nullopt;
  186. }
  187. // We assume that a semicolon is always intended to be the end of the
  188. // current construct.
  189. if (auto semi = ConsumeIf(TokenKind::Semi)) {
  190. return semi;
  191. }
  192. // Skip over any matching group of tokens().
  193. if (SkipMatchingGroup()) {
  194. continue;
  195. }
  196. // Otherwise just step forward one token.
  197. ++position_;
  198. } while (position_ != end_ &&
  199. is_same_line_or_indent_greater_than_root(*position_));
  200. return std::nullopt;
  201. }
  202. auto ParserContext::SkipTo(TokenizedBuffer::Token t) -> void {
  203. CARBON_CHECK(t >= *position_) << "Tried to skip backwards from " << position_
  204. << " to " << TokenizedBuffer::TokenIterator(t);
  205. position_ = TokenizedBuffer::TokenIterator(t);
  206. CARBON_CHECK(position_ != end_) << "Skipped past EOF.";
  207. }
  208. // Determines whether the given token is considered to be the start of an
  209. // operand according to the rules for infix operator parsing.
  210. static auto IsAssumedStartOfOperand(TokenKind kind) -> bool {
  211. return kind.IsOneOf({TokenKind::OpenParen, TokenKind::Identifier,
  212. TokenKind::IntegerLiteral, TokenKind::RealLiteral,
  213. TokenKind::StringLiteral});
  214. }
  215. // Determines whether the given token is considered to be the end of an
  216. // operand according to the rules for infix operator parsing.
  217. static auto IsAssumedEndOfOperand(TokenKind kind) -> bool {
  218. return kind.IsOneOf({TokenKind::CloseParen, TokenKind::CloseCurlyBrace,
  219. TokenKind::CloseSquareBracket, TokenKind::Identifier,
  220. TokenKind::IntegerLiteral, TokenKind::RealLiteral,
  221. TokenKind::StringLiteral});
  222. }
  223. // Determines whether the given token could possibly be the start of an
  224. // operand. This is conservatively correct, and will never incorrectly return
  225. // `false`, but can incorrectly return `true`.
  226. static auto IsPossibleStartOfOperand(TokenKind kind) -> bool {
  227. return !kind.IsOneOf({TokenKind::CloseParen, TokenKind::CloseCurlyBrace,
  228. TokenKind::CloseSquareBracket, TokenKind::Comma,
  229. TokenKind::Semi, TokenKind::Colon});
  230. }
  231. auto ParserContext::IsLexicallyValidInfixOperator() -> bool {
  232. CARBON_CHECK(position_ != end_) << "Expected an operator token.";
  233. bool leading_space = tokens().HasLeadingWhitespace(*position_);
  234. bool trailing_space = tokens().HasTrailingWhitespace(*position_);
  235. // If there's whitespace on both sides, it's an infix operator.
  236. if (leading_space && trailing_space) {
  237. return true;
  238. }
  239. // If there's whitespace on exactly one side, it's not an infix operator.
  240. if (leading_space || trailing_space) {
  241. return false;
  242. }
  243. // Otherwise, for an infix operator, the preceding token must be any close
  244. // bracket, identifier, or literal and the next token must be an open paren,
  245. // identifier, or literal.
  246. if (position_ == tokens().tokens().begin() ||
  247. !IsAssumedEndOfOperand(tokens().GetKind(*(position_ - 1))) ||
  248. !IsAssumedStartOfOperand(tokens().GetKind(*(position_ + 1)))) {
  249. return false;
  250. }
  251. return true;
  252. }
  253. auto ParserContext::IsTrailingOperatorInfix() -> bool {
  254. if (position_ == end_) {
  255. return false;
  256. }
  257. // An operator that follows the infix operator rules is parsed as
  258. // infix, unless the next token means that it can't possibly be.
  259. if (IsLexicallyValidInfixOperator() &&
  260. IsPossibleStartOfOperand(tokens().GetKind(*(position_ + 1)))) {
  261. return true;
  262. }
  263. // A trailing operator with leading whitespace that's not valid as infix is
  264. // not valid at all. If the next token looks like the start of an operand,
  265. // then parse as infix, otherwise as postfix. Either way we'll produce a
  266. // diagnostic later on.
  267. if (tokens().HasLeadingWhitespace(*position_) &&
  268. IsAssumedStartOfOperand(tokens().GetKind(*(position_ + 1)))) {
  269. return true;
  270. }
  271. return false;
  272. }
  273. auto ParserContext::DiagnoseOperatorFixity(OperatorFixity fixity) -> void {
  274. if (!PositionKind().is_symbol()) {
  275. // Whitespace-based fixity rules only apply to symbolic operators.
  276. return;
  277. }
  278. if (fixity == OperatorFixity::Infix) {
  279. // Infix operators must satisfy the infix operator rules.
  280. if (!IsLexicallyValidInfixOperator()) {
  281. CARBON_DIAGNOSTIC(BinaryOperatorRequiresWhitespace, Error,
  282. "Whitespace missing {0} binary operator.",
  283. RelativeLocation);
  284. emitter_->Emit(*position_, BinaryOperatorRequiresWhitespace,
  285. tokens().HasLeadingWhitespace(*position_)
  286. ? RelativeLocation::After
  287. : (tokens().HasTrailingWhitespace(*position_)
  288. ? RelativeLocation::Before
  289. : RelativeLocation::Around));
  290. }
  291. } else {
  292. bool prefix = fixity == OperatorFixity::Prefix;
  293. // Whitespace is not permitted between a symbolic pre/postfix operator and
  294. // its operand.
  295. if ((prefix ? tokens().HasTrailingWhitespace(*position_)
  296. : tokens().HasLeadingWhitespace(*position_))) {
  297. CARBON_DIAGNOSTIC(UnaryOperatorHasWhitespace, Error,
  298. "Whitespace is not allowed {0} this unary operator.",
  299. RelativeLocation);
  300. emitter_->Emit(
  301. *position_, UnaryOperatorHasWhitespace,
  302. prefix ? RelativeLocation::After : RelativeLocation::Before);
  303. } else if (IsLexicallyValidInfixOperator()) {
  304. // Pre/postfix operators must not satisfy the infix operator rules.
  305. CARBON_DIAGNOSTIC(UnaryOperatorRequiresWhitespace, Error,
  306. "Whitespace is required {0} this unary operator.",
  307. RelativeLocation);
  308. emitter_->Emit(
  309. *position_, UnaryOperatorRequiresWhitespace,
  310. prefix ? RelativeLocation::Before : RelativeLocation::After);
  311. }
  312. }
  313. }
  314. auto ParserContext::ConsumeListToken(ParseNodeKind comma_kind,
  315. TokenKind close_kind,
  316. bool already_has_error) -> ListTokenKind {
  317. if (!PositionIs(TokenKind::Comma) && !PositionIs(close_kind)) {
  318. // Don't error a second time on the same element.
  319. if (!already_has_error) {
  320. CARBON_DIAGNOSTIC(UnexpectedTokenAfterListElement, Error,
  321. "Expected `,` or `{0}`.", TokenKind);
  322. emitter_->Emit(*position_, UnexpectedTokenAfterListElement, close_kind);
  323. ReturnErrorOnState();
  324. }
  325. // Recover from the invalid token.
  326. auto end_of_element = FindNextOf({TokenKind::Comma, close_kind});
  327. // The lexer guarantees that parentheses are balanced.
  328. CARBON_CHECK(end_of_element)
  329. << "missing matching `" << close_kind.opening_symbol() << "` for `"
  330. << close_kind << "`";
  331. SkipTo(*end_of_element);
  332. }
  333. if (PositionIs(close_kind)) {
  334. return ListTokenKind::Close;
  335. } else {
  336. AddLeafNode(comma_kind, Consume());
  337. return PositionIs(close_kind) ? ListTokenKind::CommaClose
  338. : ListTokenKind::Comma;
  339. }
  340. }
  341. auto ParserContext::GetDeclarationContext() -> DeclarationContext {
  342. // i == 0 is the file-level DeclarationScopeLoop. Additionally, i == 1 can be
  343. // skipped because it will never be a DeclarationScopeLoop.
  344. for (int i = state_stack_.size() - 1; i > 1; --i) {
  345. // The declaration context is always the state _above_ a
  346. // DeclarationScopeLoop.
  347. if (state_stack_[i].state == ParserState::DeclarationScopeLoop) {
  348. switch (state_stack_[i - 1].state) {
  349. case ParserState::TypeDefinitionFinishAsClass:
  350. return DeclarationContext::Class;
  351. case ParserState::TypeDefinitionFinishAsInterface:
  352. return DeclarationContext::Interface;
  353. case ParserState::TypeDefinitionFinishAsNamedConstraint:
  354. return DeclarationContext::NamedConstraint;
  355. default:
  356. llvm_unreachable("Missing handling for a declaration scope");
  357. }
  358. }
  359. }
  360. CARBON_CHECK(!state_stack_.empty() &&
  361. state_stack_[0].state == ParserState::DeclarationScopeLoop);
  362. return DeclarationContext::File;
  363. }
  364. auto ParserContext::RecoverFromDeclarationError(StateStackEntry state,
  365. ParseNodeKind parse_node_kind,
  366. bool skip_past_likely_end)
  367. -> void {
  368. auto token = state.token;
  369. if (skip_past_likely_end) {
  370. if (auto semi = SkipPastLikelyEnd(token)) {
  371. token = *semi;
  372. }
  373. }
  374. AddNode(parse_node_kind, token, state.subtree_start,
  375. /*has_error=*/true);
  376. }
  377. auto ParserContext::EmitExpectedDeclarationSemi(TokenKind expected_kind)
  378. -> void {
  379. CARBON_DIAGNOSTIC(ExpectedDeclarationSemi, Error,
  380. "`{0}` declarations must end with a `;`.", TokenKind);
  381. emitter().Emit(*position(), ExpectedDeclarationSemi, expected_kind);
  382. }
  383. auto ParserContext::EmitExpectedDeclarationSemiOrDefinition(
  384. TokenKind expected_kind) -> void {
  385. CARBON_DIAGNOSTIC(ExpectedDeclarationSemiOrDefinition, Error,
  386. "`{0}` declarations must either end with a `;` or "
  387. "have a `{{ ... }` block for a definition.",
  388. TokenKind);
  389. emitter().Emit(*position(), ExpectedDeclarationSemiOrDefinition,
  390. expected_kind);
  391. }
  392. auto ParserContext::PrintForStackDump(llvm::raw_ostream& output) const -> void {
  393. output << "Parser stack:\n";
  394. for (int i = 0; i < static_cast<int>(state_stack_.size()); ++i) {
  395. const auto& entry = state_stack_[i];
  396. output << "\t" << i << ".\t" << entry.state;
  397. PrintTokenForStackDump(output, entry.token);
  398. }
  399. output << "\tcursor\tposition_";
  400. PrintTokenForStackDump(output, *position_);
  401. }
  402. auto ParserContext::PrintTokenForStackDump(llvm::raw_ostream& output,
  403. TokenizedBuffer::Token token) const
  404. -> void {
  405. output << " @ " << tokens_->GetLineNumber(tokens_->GetLine(token)) << ":"
  406. << tokens_->GetColumnNumber(token) << ": token " << token << " : "
  407. << tokens_->GetKind(token) << "\n";
  408. }
  409. } // namespace Carbon