context.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/parse/context.h"
  5. #include <optional>
  6. #include "common/check.h"
  7. #include "common/ostream.h"
  8. #include "llvm/ADT/STLExtras.h"
  9. #include "toolchain/lex/token_kind.h"
  10. #include "toolchain/lex/tokenized_buffer.h"
  11. #include "toolchain/parse/node_kind.h"
  12. #include "toolchain/parse/tree.h"
  13. namespace Carbon::Parse {
  14. // A relative location for characters in errors.
  15. enum class RelativeLocation : int8_t {
  16. Around,
  17. After,
  18. Before,
  19. };
  20. } // namespace Carbon::Parse
  21. namespace llvm {
  22. // Adapts RelativeLocation for use with formatv.
  23. template <>
  24. struct format_provider<Carbon::Parse::RelativeLocation> {
  25. using RelativeLocation = Carbon::Parse::RelativeLocation;
  26. static void format(const RelativeLocation& loc, raw_ostream& out,
  27. StringRef /*style*/) {
  28. switch (loc) {
  29. case RelativeLocation::Around:
  30. out << "around";
  31. break;
  32. case RelativeLocation::After:
  33. out << "after";
  34. break;
  35. case RelativeLocation::Before:
  36. out << "before";
  37. break;
  38. }
  39. }
  40. };
  41. } // namespace llvm
  42. namespace Carbon::Parse {
  43. Context::Context(Tree& tree, Lex::TokenizedBuffer& tokens,
  44. Lex::TokenDiagnosticEmitter& emitter,
  45. llvm::raw_ostream* vlog_stream)
  46. : tree_(&tree),
  47. tokens_(&tokens),
  48. emitter_(&emitter),
  49. vlog_stream_(vlog_stream),
  50. position_(tokens_->tokens().begin()),
  51. end_(tokens_->tokens().end()) {
  52. CARBON_CHECK(position_ != end_) << "Empty TokenizedBuffer";
  53. --end_;
  54. CARBON_CHECK(tokens_->GetKind(*end_) == Lex::TokenKind::FileEnd)
  55. << "TokenizedBuffer should end with FileEnd, ended with "
  56. << tokens_->GetKind(*end_);
  57. }
  58. auto Context::AddLeafNode(NodeKind kind, Lex::TokenIndex token, bool has_error)
  59. -> void {
  60. CheckNodeMatchesLexerToken(kind, tokens_->GetKind(token), has_error);
  61. tree_->node_impls_.push_back(
  62. Tree::NodeImpl(kind, has_error, token, /*subtree_size=*/1));
  63. if (has_error) {
  64. tree_->has_errors_ = true;
  65. }
  66. }
  67. auto Context::AddNode(NodeKind kind, Lex::TokenIndex token, int subtree_start,
  68. bool has_error) -> void {
  69. CheckNodeMatchesLexerToken(kind, tokens_->GetKind(token), has_error);
  70. int subtree_size = tree_->size() - subtree_start + 1;
  71. tree_->node_impls_.push_back(
  72. Tree::NodeImpl(kind, has_error, token, subtree_size));
  73. if (has_error) {
  74. tree_->has_errors_ = true;
  75. }
  76. }
  77. auto Context::ReplacePlaceholderNode(int32_t position, NodeKind kind,
  78. Lex::TokenIndex token, bool has_error)
  79. -> void {
  80. CARBON_CHECK(position >= 0 && position < tree_->size())
  81. << "position: " << position << " size: " << tree_->size();
  82. auto* node_impl = &tree_->node_impls_[position];
  83. CARBON_CHECK(node_impl->subtree_size == 1);
  84. CARBON_CHECK(node_impl->kind == NodeKind::Placeholder);
  85. node_impl->kind = kind;
  86. node_impl->has_error = has_error;
  87. node_impl->token = token;
  88. if (has_error) {
  89. tree_->has_errors_ = true;
  90. }
  91. }
  92. auto Context::ConsumeAndAddOpenParen(Lex::TokenIndex default_token,
  93. NodeKind start_kind)
  94. -> std::optional<Lex::TokenIndex> {
  95. if (auto open_paren = ConsumeIf(Lex::TokenKind::OpenParen)) {
  96. AddLeafNode(start_kind, *open_paren, /*has_error=*/false);
  97. return open_paren;
  98. } else {
  99. CARBON_DIAGNOSTIC(ExpectedParenAfter, Error, "Expected `(` after `{0}`.",
  100. Lex::TokenKind);
  101. emitter_->Emit(*position_, ExpectedParenAfter,
  102. tokens().GetKind(default_token));
  103. AddLeafNode(start_kind, default_token, /*has_error=*/true);
  104. return std::nullopt;
  105. }
  106. }
  107. auto Context::ConsumeAndAddCloseSymbol(Lex::TokenIndex expected_open,
  108. StateStackEntry state,
  109. NodeKind close_kind) -> void {
  110. Lex::TokenKind open_token_kind = tokens().GetKind(expected_open);
  111. if (!open_token_kind.is_opening_symbol()) {
  112. AddNode(close_kind, state.token, state.subtree_start, /*has_error=*/true);
  113. } else if (auto close_token = ConsumeIf(open_token_kind.closing_symbol())) {
  114. AddNode(close_kind, *close_token, state.subtree_start, state.has_error);
  115. } else {
  116. // TODO: Include the location of the matching opening delimiter in the
  117. // diagnostic.
  118. CARBON_DIAGNOSTIC(ExpectedCloseSymbol, Error,
  119. "Unexpected tokens before `{0}`.", llvm::StringLiteral);
  120. emitter_->Emit(*position_, ExpectedCloseSymbol,
  121. open_token_kind.closing_symbol().fixed_spelling());
  122. SkipTo(tokens().GetMatchedClosingToken(expected_open));
  123. AddNode(close_kind, Consume(), state.subtree_start, /*has_error=*/true);
  124. }
  125. }
  126. auto Context::ConsumeAndAddLeafNodeIf(Lex::TokenKind token_kind,
  127. NodeKind node_kind) -> bool {
  128. auto token = ConsumeIf(token_kind);
  129. if (!token) {
  130. return false;
  131. }
  132. AddLeafNode(node_kind, *token);
  133. return true;
  134. }
  135. auto Context::ConsumeChecked(Lex::TokenKind kind) -> Lex::TokenIndex {
  136. CARBON_CHECK(PositionIs(kind))
  137. << "Required " << kind << ", found " << PositionKind();
  138. return Consume();
  139. }
  140. auto Context::ConsumeIf(Lex::TokenKind kind) -> std::optional<Lex::TokenIndex> {
  141. if (!PositionIs(kind)) {
  142. return std::nullopt;
  143. }
  144. return Consume();
  145. }
  146. auto Context::ConsumeIfBindingPatternKeyword(Lex::TokenKind keyword_token,
  147. State keyword_state,
  148. int subtree_start) -> void {
  149. if (auto token = ConsumeIf(keyword_token)) {
  150. PushState(Context::StateStackEntry(
  151. keyword_state, PrecedenceGroup::ForTopLevelExpr(),
  152. PrecedenceGroup::ForTopLevelExpr(), *token, subtree_start));
  153. }
  154. }
  155. auto Context::FindNextOf(std::initializer_list<Lex::TokenKind> desired_kinds)
  156. -> std::optional<Lex::TokenIndex> {
  157. auto new_position = position_;
  158. while (true) {
  159. Lex::TokenIndex token = *new_position;
  160. Lex::TokenKind kind = tokens().GetKind(token);
  161. if (kind.IsOneOf(desired_kinds)) {
  162. return token;
  163. }
  164. // Step to the next token at the current bracketing level.
  165. if (kind.is_closing_symbol() || kind == Lex::TokenKind::FileEnd) {
  166. // There are no more tokens at this level.
  167. return std::nullopt;
  168. } else if (kind.is_opening_symbol()) {
  169. new_position = Lex::TokenIterator(tokens().GetMatchedClosingToken(token));
  170. // Advance past the closing token.
  171. ++new_position;
  172. } else {
  173. ++new_position;
  174. }
  175. }
  176. }
  177. auto Context::SkipMatchingGroup() -> bool {
  178. if (!PositionKind().is_opening_symbol()) {
  179. return false;
  180. }
  181. SkipTo(tokens().GetMatchedClosingToken(*position_));
  182. ++position_;
  183. return true;
  184. }
  185. auto Context::SkipPastLikelyEnd(Lex::TokenIndex skip_root)
  186. -> std::optional<Lex::TokenIndex> {
  187. if (position_ == end_) {
  188. return std::nullopt;
  189. }
  190. Lex::LineIndex root_line = tokens().GetLine(skip_root);
  191. int root_line_indent = tokens().GetIndentColumnNumber(root_line);
  192. // We will keep scanning through tokens on the same line as the root or
  193. // lines with greater indentation than root's line.
  194. auto is_same_line_or_indent_greater_than_root = [&](Lex::TokenIndex t) {
  195. Lex::LineIndex l = tokens().GetLine(t);
  196. if (l == root_line) {
  197. return true;
  198. }
  199. return tokens().GetIndentColumnNumber(l) > root_line_indent;
  200. };
  201. do {
  202. if (PositionIs(Lex::TokenKind::CloseCurlyBrace)) {
  203. // Immediately bail out if we hit an unmatched close curly, this will
  204. // pop us up a level of the syntax grouping.
  205. return std::nullopt;
  206. }
  207. // We assume that a semicolon is always intended to be the end of the
  208. // current construct.
  209. if (auto semi = ConsumeIf(Lex::TokenKind::Semi)) {
  210. return semi;
  211. }
  212. // Skip over any matching group of tokens().
  213. if (SkipMatchingGroup()) {
  214. continue;
  215. }
  216. // Otherwise just step forward one token.
  217. ++position_;
  218. } while (position_ != end_ &&
  219. is_same_line_or_indent_greater_than_root(*position_));
  220. return std::nullopt;
  221. }
  222. auto Context::SkipTo(Lex::TokenIndex t) -> void {
  223. CARBON_CHECK(t >= *position_) << "Tried to skip backwards from " << position_
  224. << " to " << Lex::TokenIterator(t);
  225. position_ = Lex::TokenIterator(t);
  226. CARBON_CHECK(position_ != end_) << "Skipped past EOF.";
  227. }
  228. // Determines whether the given token is considered to be the start of an
  229. // operand according to the rules for infix operator parsing.
  230. static auto IsAssumedStartOfOperand(Lex::TokenKind kind) -> bool {
  231. return kind.IsOneOf({Lex::TokenKind::OpenParen, Lex::TokenKind::Identifier,
  232. Lex::TokenKind::IntLiteral, Lex::TokenKind::RealLiteral,
  233. Lex::TokenKind::StringLiteral});
  234. }
  235. // Determines whether the given token is considered to be the end of an
  236. // operand according to the rules for infix operator parsing.
  237. static auto IsAssumedEndOfOperand(Lex::TokenKind kind) -> bool {
  238. return kind.IsOneOf(
  239. {Lex::TokenKind::CloseParen, Lex::TokenKind::CloseCurlyBrace,
  240. Lex::TokenKind::CloseSquareBracket, Lex::TokenKind::Identifier,
  241. Lex::TokenKind::IntLiteral, Lex::TokenKind::RealLiteral,
  242. Lex::TokenKind::StringLiteral});
  243. }
  244. // Determines whether the given token could possibly be the start of an
  245. // operand. This is conservatively correct, and will never incorrectly return
  246. // `false`, but can incorrectly return `true`.
  247. static auto IsPossibleStartOfOperand(Lex::TokenKind kind) -> bool {
  248. return !kind.IsOneOf(
  249. {Lex::TokenKind::CloseParen, Lex::TokenKind::CloseCurlyBrace,
  250. Lex::TokenKind::CloseSquareBracket, Lex::TokenKind::Comma,
  251. Lex::TokenKind::Semi, Lex::TokenKind::Colon});
  252. }
  253. auto Context::IsLexicallyValidInfixOperator() -> bool {
  254. CARBON_CHECK(position_ != end_) << "Expected an operator token.";
  255. bool leading_space = tokens().HasLeadingWhitespace(*position_);
  256. bool trailing_space = tokens().HasTrailingWhitespace(*position_);
  257. // If there's whitespace on both sides, it's an infix operator.
  258. if (leading_space && trailing_space) {
  259. return true;
  260. }
  261. // If there's whitespace on exactly one side, it's not an infix operator.
  262. if (leading_space || trailing_space) {
  263. return false;
  264. }
  265. // Otherwise, for an infix operator, the preceding token must be any close
  266. // bracket, identifier, or literal and the next token must be an open paren,
  267. // identifier, or literal.
  268. if (position_ == tokens().tokens().begin() ||
  269. !IsAssumedEndOfOperand(tokens().GetKind(*(position_ - 1))) ||
  270. !IsAssumedStartOfOperand(tokens().GetKind(*(position_ + 1)))) {
  271. return false;
  272. }
  273. return true;
  274. }
  275. auto Context::IsTrailingOperatorInfix() -> bool {
  276. if (position_ == end_) {
  277. return false;
  278. }
  279. // An operator that follows the infix operator rules is parsed as
  280. // infix, unless the next token means that it can't possibly be.
  281. if (IsLexicallyValidInfixOperator() &&
  282. IsPossibleStartOfOperand(tokens().GetKind(*(position_ + 1)))) {
  283. return true;
  284. }
  285. // A trailing operator with leading whitespace that's not valid as infix is
  286. // not valid at all. If the next token looks like the start of an operand,
  287. // then parse as infix, otherwise as postfix. Either way we'll produce a
  288. // diagnostic later on.
  289. if (tokens().HasLeadingWhitespace(*position_) &&
  290. IsAssumedStartOfOperand(tokens().GetKind(*(position_ + 1)))) {
  291. return true;
  292. }
  293. return false;
  294. }
  295. auto Context::DiagnoseOperatorFixity(OperatorFixity fixity) -> void {
  296. if (!PositionKind().is_symbol()) {
  297. // Whitespace-based fixity rules only apply to symbolic operators.
  298. return;
  299. }
  300. if (fixity == OperatorFixity::Infix) {
  301. // Infix operators must satisfy the infix operator rules.
  302. if (!IsLexicallyValidInfixOperator()) {
  303. CARBON_DIAGNOSTIC(BinaryOperatorRequiresWhitespace, Error,
  304. "Whitespace missing {0} binary operator.",
  305. RelativeLocation);
  306. emitter_->Emit(*position_, BinaryOperatorRequiresWhitespace,
  307. tokens().HasLeadingWhitespace(*position_)
  308. ? RelativeLocation::After
  309. : (tokens().HasTrailingWhitespace(*position_)
  310. ? RelativeLocation::Before
  311. : RelativeLocation::Around));
  312. }
  313. } else {
  314. bool prefix = fixity == OperatorFixity::Prefix;
  315. // Whitespace is not permitted between a symbolic pre/postfix operator and
  316. // its operand.
  317. if ((prefix ? tokens().HasTrailingWhitespace(*position_)
  318. : tokens().HasLeadingWhitespace(*position_))) {
  319. CARBON_DIAGNOSTIC(UnaryOperatorHasWhitespace, Error,
  320. "Whitespace is not allowed {0} this unary operator.",
  321. RelativeLocation);
  322. emitter_->Emit(
  323. *position_, UnaryOperatorHasWhitespace,
  324. prefix ? RelativeLocation::After : RelativeLocation::Before);
  325. } else if (IsLexicallyValidInfixOperator()) {
  326. // Pre/postfix operators must not satisfy the infix operator rules.
  327. CARBON_DIAGNOSTIC(UnaryOperatorRequiresWhitespace, Error,
  328. "Whitespace is required {0} this unary operator.",
  329. RelativeLocation);
  330. emitter_->Emit(
  331. *position_, UnaryOperatorRequiresWhitespace,
  332. prefix ? RelativeLocation::Before : RelativeLocation::After);
  333. }
  334. }
  335. }
  336. auto Context::ConsumeListToken(NodeKind comma_kind, Lex::TokenKind close_kind,
  337. bool already_has_error) -> ListTokenKind {
  338. if (!PositionIs(Lex::TokenKind::Comma) && !PositionIs(close_kind)) {
  339. // Don't error a second time on the same element.
  340. if (!already_has_error) {
  341. CARBON_DIAGNOSTIC(UnexpectedTokenAfterListElement, Error,
  342. "Expected `,` or `{0}`.", Lex::TokenKind);
  343. emitter_->Emit(*position_, UnexpectedTokenAfterListElement, close_kind);
  344. ReturnErrorOnState();
  345. }
  346. // Recover from the invalid token.
  347. auto end_of_element = FindNextOf({Lex::TokenKind::Comma, close_kind});
  348. // The lexer guarantees that parentheses are balanced.
  349. CARBON_CHECK(end_of_element)
  350. << "missing matching `" << close_kind.opening_symbol() << "` for `"
  351. << close_kind << "`";
  352. SkipTo(*end_of_element);
  353. }
  354. if (PositionIs(close_kind)) {
  355. return ListTokenKind::Close;
  356. } else {
  357. AddLeafNode(comma_kind, Consume());
  358. return PositionIs(close_kind) ? ListTokenKind::CommaClose
  359. : ListTokenKind::Comma;
  360. }
  361. }
  362. auto Context::GetDeclContext() -> DeclContext {
  363. // i == 0 is the file-level DeclScopeLoop. Additionally, i == 1 can be
  364. // skipped because it will never be a DeclScopeLoop.
  365. for (int i = state_stack_.size() - 1; i > 1; --i) {
  366. // The declaration context is always the state _above_ a
  367. // DeclScopeLoop.
  368. if (state_stack_[i].state == State::DeclScopeLoop) {
  369. switch (state_stack_[i - 1].state) {
  370. case State::TypeDefinitionFinishAsClass:
  371. return DeclContext::Class;
  372. case State::TypeDefinitionFinishAsInterface:
  373. return DeclContext::Interface;
  374. case State::TypeDefinitionFinishAsNamedConstraint:
  375. return DeclContext::NamedConstraint;
  376. default:
  377. llvm_unreachable("Missing handling for a declaration scope");
  378. }
  379. }
  380. }
  381. CARBON_CHECK(!state_stack_.empty() &&
  382. state_stack_[0].state == State::DeclScopeLoop);
  383. return DeclContext::File;
  384. }
  385. auto Context::RecoverFromDeclError(StateStackEntry state,
  386. NodeKind parse_node_kind,
  387. bool skip_past_likely_end) -> void {
  388. auto token = state.token;
  389. if (skip_past_likely_end) {
  390. if (auto semi = SkipPastLikelyEnd(token)) {
  391. token = *semi;
  392. }
  393. }
  394. AddNode(parse_node_kind, token, state.subtree_start,
  395. /*has_error=*/true);
  396. }
  397. auto Context::EmitExpectedDeclSemi(Lex::TokenKind expected_kind) -> void {
  398. CARBON_DIAGNOSTIC(ExpectedDeclSemi, Error,
  399. "`{0}` declarations must end with a `;`.", Lex::TokenKind);
  400. emitter().Emit(*position(), ExpectedDeclSemi, expected_kind);
  401. }
  402. auto Context::EmitExpectedDeclSemiOrDefinition(Lex::TokenKind expected_kind)
  403. -> void {
  404. CARBON_DIAGNOSTIC(ExpectedDeclSemiOrDefinition, Error,
  405. "`{0}` declarations must either end with a `;` or "
  406. "have a `{{ ... }` block for a definition.",
  407. Lex::TokenKind);
  408. emitter().Emit(*position(), ExpectedDeclSemiOrDefinition, expected_kind);
  409. }
  410. auto Context::PrintForStackDump(llvm::raw_ostream& output) const -> void {
  411. output << "Parser stack:\n";
  412. for (auto [i, entry] : llvm::enumerate(state_stack_)) {
  413. output << "\t" << i << ".\t" << entry.state;
  414. PrintTokenForStackDump(output, entry.token);
  415. }
  416. output << "\tcursor\tposition_";
  417. PrintTokenForStackDump(output, *position_);
  418. }
  419. auto Context::PrintTokenForStackDump(llvm::raw_ostream& output,
  420. Lex::TokenIndex token) const -> void {
  421. output << " @ " << tokens_->GetLineNumber(tokens_->GetLine(token)) << ":"
  422. << tokens_->GetColumnNumber(token) << ": token " << token << " : "
  423. << tokens_->GetKind(token) << "\n";
  424. }
  425. } // namespace Carbon::Parse