Просмотр исходного кода

Rename Lex::{Token,Line} -> Lex::{Token,Line}Index. (#3433)

As discussed [on
discord](https://discord.com/channels/655572317891461132/655578254970716160/1178878128714678282)
and today's toolchain discussion.
Richard Smith 2 лет назад
Родитель
Сommit
eae630a3db

+ 22 - 22
toolchain/lex/lex.cpp

@@ -58,7 +58,7 @@ class [[clang::internal_linkage]] Lexer {
     // Consumes (and discard) a valid token to construct a result
     // indicating a token has been produced. Relies on implicit conversions.
     // NOLINTNEXTLINE(google-explicit-constructor)
-    LexResult(Token /*discarded_token*/) : LexResult(true) {}
+    LexResult(TokenIndex /*discarded_token*/) : LexResult(true) {}
 
     // Returns a result indicating no token was produced.
     static auto NoMatch() -> LexResult { return LexResult(false); }
@@ -89,7 +89,7 @@ class [[clang::internal_linkage]] Lexer {
   // But because it can, the compiler will flatten this otherwise.
   [[gnu::noinline]] auto CreateLines(llvm::StringRef source_text) -> void;
 
-  auto current_line() -> Line { return Line(line_index_); }
+  auto current_line() -> LineIndex { return LineIndex(line_index_); }
 
   auto current_line_info() -> TokenizedBuffer::LineInfo* {
     return &buffer_.line_infos_[line_index_];
@@ -125,7 +125,7 @@ class [[clang::internal_linkage]] Lexer {
       -> LexResult;
 
   auto LexOneCharSymbolToken(llvm::StringRef source_text, TokenKind kind,
-                             ssize_t& position) -> Token;
+                             ssize_t& position) -> TokenIndex;
 
   auto LexOpeningSymbolToken(llvm::StringRef source_text, TokenKind kind,
                              ssize_t& position) -> LexResult;
@@ -170,7 +170,7 @@ class [[clang::internal_linkage]] Lexer {
 
   ssize_t line_index_;
 
-  llvm::SmallVector<Token> open_groups_;
+  llvm::SmallVector<TokenIndex> open_groups_;
 
   ErrorTrackingDiagnosticConsumer consumer_;
 
@@ -937,7 +937,7 @@ auto Lexer::LexStringLiteral(llvm::StringRef source_text, ssize_t& position)
     return LexError(source_text, position);
   }
 
-  Line string_line = current_line();
+  LineIndex string_line = current_line();
   int string_column = ComputeColumn(position);
   ssize_t literal_size = literal->text().size();
   position += literal_size;
@@ -975,7 +975,7 @@ auto Lexer::LexStringLiteral(llvm::StringRef source_text, ssize_t& position)
 }
 
 auto Lexer::LexOneCharSymbolToken(llvm::StringRef source_text, TokenKind kind,
-                                  ssize_t& position) -> Token {
+                                  ssize_t& position) -> TokenIndex {
   // Verify in a debug build that the incoming token kind is correct.
   CARBON_DCHECK(kind != TokenKind::Error);
   CARBON_DCHECK(kind.fixed_spelling().size() == 1);
@@ -984,16 +984,16 @@ auto Lexer::LexOneCharSymbolToken(llvm::StringRef source_text, TokenKind kind,
       << "' instead of the spelling '" << kind.fixed_spelling()
       << "' of the incoming token kind '" << kind << "'";
 
-  Token token = buffer_.AddToken({.kind = kind,
-                                  .token_line = current_line(),
-                                  .column = ComputeColumn(position)});
+  TokenIndex token = buffer_.AddToken({.kind = kind,
+                                       .token_line = current_line(),
+                                       .column = ComputeColumn(position)});
   ++position;
   return token;
 }
 
 auto Lexer::LexOpeningSymbolToken(llvm::StringRef source_text, TokenKind kind,
                                   ssize_t& position) -> LexResult {
-  Token token = LexOneCharSymbolToken(source_text, kind, position);
+  TokenIndex token = LexOneCharSymbolToken(source_text, kind, position);
   open_groups_.push_back(token);
   return token;
 }
@@ -1004,10 +1004,10 @@ auto Lexer::LexClosingSymbolToken(llvm::StringRef source_text, TokenKind kind,
     CARBON_DIAGNOSTIC(UnmatchedClosing, Error,
                       "Closing symbol without a corresponding opening symbol.");
     emitter_.Emit(source_text.begin() + position, UnmatchedClosing);
-    Token token = buffer_.AddToken({.kind = TokenKind::Error,
-                                    .token_line = current_line(),
-                                    .column = ComputeColumn(position),
-                                    .error_length = 1});
+    TokenIndex token = buffer_.AddToken({.kind = TokenKind::Error,
+                                         .token_line = current_line(),
+                                         .column = ComputeColumn(position),
+                                         .error_length = 1});
     ++position;
     return token;
   };
@@ -1017,7 +1017,7 @@ auto Lexer::LexClosingSymbolToken(llvm::StringRef source_text, TokenKind kind,
     return unmatched_error();
   }
 
-  Token opening_token = open_groups_.back();
+  TokenIndex opening_token = open_groups_.back();
   // Close any invalid open groups first.
   if (LLVM_UNLIKELY(buffer_.GetTokenInfo(opening_token).kind !=
                     kind.opening_symbol())) {
@@ -1033,7 +1033,7 @@ auto Lexer::LexClosingSymbolToken(llvm::StringRef source_text, TokenKind kind,
   open_groups_.pop_back();
 
   // Now that the groups are all matched up, lex the actual token.
-  Token token = LexOneCharSymbolToken(source_text, kind, position);
+  TokenIndex token = LexOneCharSymbolToken(source_text, kind, position);
 
   // Note that it is important to get fresh token infos here as lexing the
   // open token would invalidate any pointers.
@@ -1059,9 +1059,9 @@ auto Lexer::LexSymbolToken(llvm::StringRef source_text, ssize_t& position)
     return LexError(source_text, position);
   }
 
-  Token token = buffer_.AddToken({.kind = kind,
-                                  .token_line = current_line(),
-                                  .column = ComputeColumn(position)});
+  TokenIndex token = buffer_.AddToken({.kind = kind,
+                                       .token_line = current_line(),
+                                       .column = ComputeColumn(position)});
   position += kind.fixed_spelling().size();
   return token;
 }
@@ -1119,7 +1119,7 @@ auto Lexer::CloseInvalidOpenGroups(TokenKind kind, ssize_t position) -> void {
   int column = ComputeColumn(position);
 
   do {
-    Token opening_token = open_groups_.back();
+    TokenIndex opening_token = open_groups_.back();
     TokenKind opening_kind = buffer_.GetTokenInfo(opening_token).kind;
     if (kind == opening_kind.closing_symbol()) {
       return;
@@ -1133,11 +1133,11 @@ auto Lexer::CloseInvalidOpenGroups(TokenKind kind, ssize_t position) -> void {
 
     CARBON_CHECK(!buffer_.tokens().empty())
         << "Must have a prior opening token!";
-    Token prev_token = buffer_.tokens().end()[-1];
+    TokenIndex prev_token = buffer_.tokens().end()[-1];
 
     // TODO: do a smarter backwards scan for where to put the closing
     // token.
-    Token closing_token = buffer_.AddToken(
+    TokenIndex closing_token = buffer_.AddToken(
         {.kind = opening_kind.closing_symbol(),
          .has_trailing_space = buffer_.HasTrailingWhitespace(prev_token),
          .is_recovery = true,

+ 1 - 1
toolchain/lex/token_kind.def

@@ -21,7 +21,7 @@
 //   - CARBON_KEYWORD_TOKEN(Name, Spelling)
 //     Defines a keyword which has the provided spelling, such as `if`.
 //     Spellings must be unique.
-//   - CARBON_TOKEN_WITH_VIRTUAL_NODE(Token)
+//   - CARBON_TOKEN_WITH_VIRTUAL_NODE(TokenIndex)
 //     Wrapped around one of the abokve _TOKEN macros, indicates that this
 //     token has one additional virtual node in the parse tree.
 //

+ 42 - 38
toolchain/lex/tokenized_buffer.cpp

@@ -19,23 +19,23 @@
 
 namespace Carbon::Lex {
 
-auto TokenizedBuffer::GetKind(Token token) const -> TokenKind {
+auto TokenizedBuffer::GetKind(TokenIndex token) const -> TokenKind {
   return GetTokenInfo(token).kind;
 }
 
-auto TokenizedBuffer::GetLine(Token token) const -> Line {
+auto TokenizedBuffer::GetLine(TokenIndex token) const -> LineIndex {
   return GetTokenInfo(token).token_line;
 }
 
-auto TokenizedBuffer::GetLineNumber(Token token) const -> int {
+auto TokenizedBuffer::GetLineNumber(TokenIndex token) const -> int {
   return GetLineNumber(GetLine(token));
 }
 
-auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
+auto TokenizedBuffer::GetColumnNumber(TokenIndex token) const -> int {
   return GetTokenInfo(token).column + 1;
 }
 
-auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
+auto TokenizedBuffer::GetTokenText(TokenIndex token) const -> llvm::StringRef {
   const auto& token_info = GetTokenInfo(token);
   llvm::StringRef fixed_spelling = token_info.kind.fixed_spelling();
   if (!fixed_spelling.empty()) {
@@ -90,82 +90,83 @@ auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
   return value_stores_->identifiers().Get(token_info.ident_id);
 }
 
-auto TokenizedBuffer::GetIdentifier(Token token) const -> IdentifierId {
+auto TokenizedBuffer::GetIdentifier(TokenIndex token) const -> IdentifierId {
   const auto& token_info = GetTokenInfo(token);
   CARBON_CHECK(token_info.kind == TokenKind::Identifier) << token_info.kind;
   return token_info.ident_id;
 }
 
-auto TokenizedBuffer::GetIntegerLiteral(Token token) const -> IntegerId {
+auto TokenizedBuffer::GetIntegerLiteral(TokenIndex token) const -> IntegerId {
   const auto& token_info = GetTokenInfo(token);
   CARBON_CHECK(token_info.kind == TokenKind::IntegerLiteral) << token_info.kind;
   return token_info.integer_id;
 }
 
-auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealId {
+auto TokenizedBuffer::GetRealLiteral(TokenIndex token) const -> RealId {
   const auto& token_info = GetTokenInfo(token);
   CARBON_CHECK(token_info.kind == TokenKind::RealLiteral) << token_info.kind;
   return token_info.real_id;
 }
 
-auto TokenizedBuffer::GetStringLiteral(Token token) const -> StringLiteralId {
+auto TokenizedBuffer::GetStringLiteral(TokenIndex token) const
+    -> StringLiteralId {
   const auto& token_info = GetTokenInfo(token);
   CARBON_CHECK(token_info.kind == TokenKind::StringLiteral) << token_info.kind;
   return token_info.string_literal_id;
 }
 
-auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
+auto TokenizedBuffer::GetTypeLiteralSize(TokenIndex token) const
     -> const llvm::APInt& {
   const auto& token_info = GetTokenInfo(token);
   CARBON_CHECK(token_info.kind.is_sized_type_literal()) << token_info.kind;
   return value_stores_->integers().Get(token_info.integer_id);
 }
 
-auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
-    -> Token {
+auto TokenizedBuffer::GetMatchedClosingToken(TokenIndex opening_token) const
+    -> TokenIndex {
   const auto& opening_token_info = GetTokenInfo(opening_token);
   CARBON_CHECK(opening_token_info.kind.is_opening_symbol())
       << opening_token_info.kind;
   return opening_token_info.closing_token;
 }
 
-auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
-    -> Token {
+auto TokenizedBuffer::GetMatchedOpeningToken(TokenIndex closing_token) const
+    -> TokenIndex {
   const auto& closing_token_info = GetTokenInfo(closing_token);
   CARBON_CHECK(closing_token_info.kind.is_closing_symbol())
       << closing_token_info.kind;
   return closing_token_info.opening_token;
 }
 
-auto TokenizedBuffer::HasLeadingWhitespace(Token token) const -> bool {
+auto TokenizedBuffer::HasLeadingWhitespace(TokenIndex token) const -> bool {
   auto it = TokenIterator(token);
   return it == tokens().begin() || GetTokenInfo(*(it - 1)).has_trailing_space;
 }
 
-auto TokenizedBuffer::HasTrailingWhitespace(Token token) const -> bool {
+auto TokenizedBuffer::HasTrailingWhitespace(TokenIndex token) const -> bool {
   return GetTokenInfo(token).has_trailing_space;
 }
 
-auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
+auto TokenizedBuffer::IsRecoveryToken(TokenIndex token) const -> bool {
   return GetTokenInfo(token).is_recovery;
 }
 
-auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
+auto TokenizedBuffer::GetLineNumber(LineIndex line) const -> int {
   return line.index + 1;
 }
 
-auto TokenizedBuffer::GetNextLine(Line line) const -> Line {
-  Line next(line.index + 1);
+auto TokenizedBuffer::GetNextLine(LineIndex line) const -> LineIndex {
+  LineIndex next(line.index + 1);
   CARBON_DCHECK(static_cast<size_t>(next.index) < line_infos_.size());
   return next;
 }
 
-auto TokenizedBuffer::GetPrevLine(Line line) const -> Line {
+auto TokenizedBuffer::GetPrevLine(LineIndex line) const -> LineIndex {
   CARBON_CHECK(line.index > 0);
-  return Line(line.index - 1);
+  return LineIndex(line.index - 1);
 }
 
-auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
+auto TokenizedBuffer::GetIndentColumnNumber(LineIndex line) const -> int {
   return GetLineInfo(line).indent + 1;
 }
 
@@ -191,7 +192,8 @@ static auto ComputeDecimalPrintedWidth(int number) -> int {
   return static_cast<int>(std::log10(number)) + 1;
 }
 
-auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
+auto TokenizedBuffer::GetTokenPrintWidths(TokenIndex token) const
+    -> PrintWidths {
   PrintWidths widths = {};
   widths.index = ComputeDecimalPrintedWidth(token_infos_.size());
   widths.kind = GetKind(token).name().size();
@@ -212,11 +214,11 @@ auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
 
   PrintWidths widths = {};
   widths.index = ComputeDecimalPrintedWidth((token_infos_.size()));
-  for (Token token : tokens()) {
+  for (TokenIndex token : tokens()) {
     widths.Widen(GetTokenPrintWidths(token));
   }
 
-  for (Token token : tokens()) {
+  for (TokenIndex token : tokens()) {
     PrintToken(output_stream, token, widths);
     output_stream << "\n";
   }
@@ -224,12 +226,13 @@ auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
 }
 
 auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
-                                 Token token) const -> void {
+                                 TokenIndex token) const -> void {
   PrintToken(output_stream, token, {});
 }
 
-auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
-                                 PrintWidths widths) const -> void {
+auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
+                                 TokenIndex token, PrintWidths widths) const
+    -> void {
   widths.Widen(GetTokenPrintWidths(token));
   int token_index = token.index;
   const auto& token_info = GetTokenInfo(token);
@@ -292,31 +295,31 @@ auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
   output_stream << " },";
 }
 
-auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
+auto TokenizedBuffer::GetLineInfo(LineIndex line) -> LineInfo& {
   return line_infos_[line.index];
 }
 
-auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
+auto TokenizedBuffer::GetLineInfo(LineIndex line) const -> const LineInfo& {
   return line_infos_[line.index];
 }
 
-auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
+auto TokenizedBuffer::AddLine(LineInfo info) -> LineIndex {
   line_infos_.push_back(info);
-  return Line(static_cast<int>(line_infos_.size()) - 1);
+  return LineIndex(static_cast<int>(line_infos_.size()) - 1);
 }
 
-auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
+auto TokenizedBuffer::GetTokenInfo(TokenIndex token) -> TokenInfo& {
   return token_infos_[token.index];
 }
 
-auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
+auto TokenizedBuffer::GetTokenInfo(TokenIndex token) const -> const TokenInfo& {
   return token_infos_[token.index];
 }
 
-auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
+auto TokenizedBuffer::AddToken(TokenInfo info) -> TokenIndex {
   token_infos_.push_back(info);
   expected_parse_tree_size_ += info.kind.expected_parse_tree_size();
-  return Token(static_cast<int>(token_infos_.size()) - 1);
+  return TokenIndex(static_cast<int>(token_infos_.size()) - 1);
 }
 
 auto TokenIterator::Print(llvm::raw_ostream& output) const -> void {
@@ -368,7 +371,8 @@ auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
           .column_number = column_number + 1};
 }
 
-auto TokenLocationTranslator::GetLocation(Token token) -> DiagnosticLocation {
+auto TokenLocationTranslator::GetLocation(TokenIndex token)
+    -> DiagnosticLocation {
   // Map the token location into a position within the source buffer.
   const auto& token_info = buffer_->GetTokenInfo(token);
   const auto& line_info = buffer_->GetLineInfo(token_info.token_line);

+ 62 - 59
toolchain/lex/tokenized_buffer.h

@@ -28,54 +28,55 @@ class TokenizedBuffer;
 
 // A lightweight handle to a lexed token in a `TokenizedBuffer`.
 //
-// `Token` objects are designed to be passed by value, not reference or
+// `TokenIndex` objects are designed to be passed by value, not reference or
 // pointer. They are also designed to be small and efficient to store in data
 // structures.
 //
-// `Token` objects from the same `TokenizedBuffer` can be compared with each
-// other, both for being the same token within the buffer, and to establish
+// `TokenIndex` objects from the same `TokenizedBuffer` can be compared with
+// each other, both for being the same token within the buffer, and to establish
 // relative position within the token stream that has been lexed out of the
-// buffer. `Token` objects from different `TokenizedBuffer`s cannot be
+// buffer. `TokenIndex` objects from different `TokenizedBuffer`s cannot be
 // meaningfully compared.
 //
-// All other APIs to query a `Token` are on the `TokenizedBuffer`.
-struct Token : public ComparableIndexBase {
-  static const Token Invalid;
+// All other APIs to query a `TokenIndex` are on the `TokenizedBuffer`.
+struct TokenIndex : public ComparableIndexBase {
+  static const TokenIndex Invalid;
   // Comments aren't tokenized, so this is the first token after FileStart.
-  static const Token FirstNonCommentToken;
+  static const TokenIndex FirstNonCommentToken;
   using ComparableIndexBase::ComparableIndexBase;
 };
 
-constexpr Token Token::Invalid(Token::InvalidIndex);
-constexpr Token Token::FirstNonCommentToken(1);
+constexpr TokenIndex TokenIndex::Invalid(TokenIndex::InvalidIndex);
+constexpr TokenIndex TokenIndex::FirstNonCommentToken(1);
 
 // A lightweight handle to a lexed line in a `TokenizedBuffer`.
 //
-// `Line` objects are designed to be passed by value, not reference or
+// `LineIndex` objects are designed to be passed by value, not reference or
 // pointer. They are also designed to be small and efficient to store in data
 // structures.
 //
-// Each `Line` object refers to a specific line in the source code that was
+// Each `LineIndex` object refers to a specific line in the source code that was
 // lexed. They can be compared directly to establish that they refer to the
 // same line or the relative position of different lines within the source.
 //
-// All other APIs to query a `Line` are on the `TokenizedBuffer`.
-struct Line : public ComparableIndexBase {
-  static const Line Invalid;
+// All other APIs to query a `LineIndex` are on the `TokenizedBuffer`.
+struct LineIndex : public ComparableIndexBase {
+  static const LineIndex Invalid;
   using ComparableIndexBase::ComparableIndexBase;
 };
 
-constexpr Line Line::Invalid(Line::InvalidIndex);
+constexpr LineIndex LineIndex::Invalid(LineIndex::InvalidIndex);
 
 // Random-access iterator over tokens within the buffer.
 class TokenIterator
-    : public llvm::iterator_facade_base<
-          TokenIterator, std::random_access_iterator_tag, const Token, int>,
+    : public llvm::iterator_facade_base<TokenIterator,
+                                        std::random_access_iterator_tag,
+                                        const TokenIndex, int>,
       public Printable<TokenIterator> {
  public:
   TokenIterator() = delete;
 
-  explicit TokenIterator(Token token) : token_(token) {}
+  explicit TokenIterator(TokenIndex token) : token_(token) {}
 
   auto operator==(const TokenIterator& rhs) const -> bool {
     return token_ == rhs.token_;
@@ -84,7 +85,7 @@ class TokenIterator
     return token_ < rhs.token_;
   }
 
-  auto operator*() const -> const Token& { return token_; }
+  auto operator*() const -> const TokenIndex& { return token_; }
 
   using iterator_facade_base::operator-;
   auto operator-(const TokenIterator& rhs) const -> int {
@@ -106,18 +107,19 @@ class TokenIterator
  private:
   friend class TokenizedBuffer;
 
-  Token token_;
+  TokenIndex token_;
 };
 
 // A diagnostic location translator that maps token locations into source
 // buffer locations.
-class TokenLocationTranslator : public DiagnosticLocationTranslator<Token> {
+class TokenLocationTranslator
+    : public DiagnosticLocationTranslator<TokenIndex> {
  public:
   explicit TokenLocationTranslator(const TokenizedBuffer* buffer)
       : buffer_(buffer) {}
 
   // Map the given token into a diagnostic location.
-  auto GetLocation(Token token) -> DiagnosticLocation override;
+  auto GetLocation(TokenIndex token) -> DiagnosticLocation override;
 
  private:
   const TokenizedBuffer* buffer_;
@@ -133,65 +135,65 @@ class TokenLocationTranslator : public DiagnosticLocationTranslator<Token> {
 // `HasError` returning true.
 class TokenizedBuffer : public Printable<TokenizedBuffer> {
  public:
-  auto GetKind(Token token) const -> TokenKind;
-  auto GetLine(Token token) const -> Line;
+  auto GetKind(TokenIndex token) const -> TokenKind;
+  auto GetLine(TokenIndex token) const -> LineIndex;
 
   // Returns the 1-based line number.
-  auto GetLineNumber(Token token) const -> int;
+  auto GetLineNumber(TokenIndex token) const -> int;
 
   // Returns the 1-based column number.
-  auto GetColumnNumber(Token token) const -> int;
+  auto GetColumnNumber(TokenIndex token) const -> int;
 
   // Returns the source text lexed into this token.
-  auto GetTokenText(Token token) const -> llvm::StringRef;
+  auto GetTokenText(TokenIndex token) const -> llvm::StringRef;
 
   // Returns the identifier associated with this token. The token kind must be
   // an `Identifier`.
-  auto GetIdentifier(Token token) const -> IdentifierId;
+  auto GetIdentifier(TokenIndex token) const -> IdentifierId;
 
   // Returns the value of an `IntegerLiteral()` token.
-  auto GetIntegerLiteral(Token token) const -> IntegerId;
+  auto GetIntegerLiteral(TokenIndex token) const -> IntegerId;
 
   // Returns the value of an `RealLiteral()` token.
-  auto GetRealLiteral(Token token) const -> RealId;
+  auto GetRealLiteral(TokenIndex token) const -> RealId;
 
   // Returns the value of a `StringLiteral()` token.
-  auto GetStringLiteral(Token token) const -> StringLiteralId;
+  auto GetStringLiteral(TokenIndex token) const -> StringLiteralId;
 
   // Returns the size specified in a `*TypeLiteral()` token.
-  auto GetTypeLiteralSize(Token token) const -> const llvm::APInt&;
+  auto GetTypeLiteralSize(TokenIndex token) const -> const llvm::APInt&;
 
   // Returns the closing token matched with the given opening token.
   //
   // The given token must be an opening token kind.
-  auto GetMatchedClosingToken(Token opening_token) const -> Token;
+  auto GetMatchedClosingToken(TokenIndex opening_token) const -> TokenIndex;
 
   // Returns the opening token matched with the given closing token.
   //
   // The given token must be a closing token kind.
-  auto GetMatchedOpeningToken(Token closing_token) const -> Token;
+  auto GetMatchedOpeningToken(TokenIndex closing_token) const -> TokenIndex;
 
   // Returns whether the given token has leading whitespace.
-  auto HasLeadingWhitespace(Token token) const -> bool;
+  auto HasLeadingWhitespace(TokenIndex token) const -> bool;
   // Returns whether the given token has trailing whitespace.
-  auto HasTrailingWhitespace(Token token) const -> bool;
+  auto HasTrailingWhitespace(TokenIndex token) const -> bool;
 
   // Returns whether the token was created as part of an error recovery effort.
   //
   // For example, a closing paren inserted to match an unmatched paren.
-  auto IsRecoveryToken(Token token) const -> bool;
+  auto IsRecoveryToken(TokenIndex token) const -> bool;
 
   // Returns the 1-based line number.
-  auto GetLineNumber(Line line) const -> int;
+  auto GetLineNumber(LineIndex line) const -> int;
 
   // Returns the 1-based indentation column number.
-  auto GetIndentColumnNumber(Line line) const -> int;
+  auto GetIndentColumnNumber(LineIndex line) const -> int;
 
   // Returns the next line handle.
-  auto GetNextLine(Line line) const -> Line;
+  auto GetNextLine(LineIndex line) const -> LineIndex;
 
   // Returns the previous line handle.
-  auto GetPrevLine(Line line) const -> Line;
+  auto GetPrevLine(LineIndex line) const -> LineIndex;
 
   // Prints a description of the tokenized stream to the provided `raw_ostream`.
   //
@@ -216,14 +218,15 @@ class TokenizedBuffer : public Printable<TokenizedBuffer> {
 
   // Prints a description of a single token.  See `Print` for details on the
   // format.
-  auto PrintToken(llvm::raw_ostream& output_stream, Token token) const -> void;
+  auto PrintToken(llvm::raw_ostream& output_stream, TokenIndex token) const
+      -> void;
 
   // Returns true if the buffer has errors that were detected at lexing time.
   auto has_errors() const -> bool { return has_errors_; }
 
   auto tokens() const -> llvm::iterator_range<TokenIterator> {
-    return llvm::make_range(TokenIterator(Token(0)),
-                            TokenIterator(Token(token_infos_.size())));
+    return llvm::make_range(TokenIterator(TokenIndex(0)),
+                            TokenIterator(TokenIndex(token_infos_.size())));
   }
 
   auto size() const -> int { return token_infos_.size(); }
@@ -277,8 +280,8 @@ class TokenizedBuffer : public Printable<TokenizedBuffer> {
     // Whether the token was injected artificially during error recovery.
     bool is_recovery = false;
 
-    // Line on which the Token starts.
-    Line token_line;
+    // LineIndex on which the TokenIndex starts.
+    LineIndex token_line;
 
     // Zero-based byte offset of the token within its line.
     int32_t column;
@@ -286,15 +289,15 @@ class TokenizedBuffer : public Printable<TokenizedBuffer> {
     // We may have up to 32 bits of payload, based on the kind of token.
     union {
       static_assert(
-          sizeof(Token) <= sizeof(int32_t),
+          sizeof(TokenIndex) <= sizeof(int32_t),
           "Unable to pack token and identifier index into the same space!");
 
       IdentifierId ident_id = IdentifierId::Invalid;
       StringLiteralId string_literal_id;
       IntegerId integer_id;
       RealId real_id;
-      Token closing_token;
-      Token opening_token;
+      TokenIndex closing_token;
+      TokenIndex opening_token;
       int32_t error_length;
     };
   };
@@ -330,14 +333,14 @@ class TokenizedBuffer : public Printable<TokenizedBuffer> {
                            SourceBuffer& source)
       : value_stores_(&value_stores), source_(&source) {}
 
-  auto GetLineInfo(Line line) -> LineInfo&;
-  auto GetLineInfo(Line line) const -> const LineInfo&;
-  auto AddLine(LineInfo info) -> Line;
-  auto GetTokenInfo(Token token) -> TokenInfo&;
-  auto GetTokenInfo(Token token) const -> const TokenInfo&;
-  auto AddToken(TokenInfo info) -> Token;
-  auto GetTokenPrintWidths(Token token) const -> PrintWidths;
-  auto PrintToken(llvm::raw_ostream& output_stream, Token token,
+  auto GetLineInfo(LineIndex line) -> LineInfo&;
+  auto GetLineInfo(LineIndex line) const -> const LineInfo&;
+  auto AddLine(LineInfo info) -> LineIndex;
+  auto GetTokenInfo(TokenIndex token) -> TokenInfo&;
+  auto GetTokenInfo(TokenIndex token) const -> const TokenInfo&;
+  auto AddToken(TokenInfo info) -> TokenIndex;
+  auto GetTokenPrintWidths(TokenIndex token) const -> PrintWidths;
+  auto PrintToken(llvm::raw_ostream& output_stream, TokenIndex token,
                   PrintWidths widths) const -> void;
 
   // Used to allocate computed string literals.
@@ -366,7 +369,7 @@ class TokenizedBuffer : public Printable<TokenizedBuffer> {
 using LexerDiagnosticEmitter = DiagnosticEmitter<const char*>;
 
 // A diagnostic emitter that uses tokens as its source of location information.
-using TokenDiagnosticEmitter = DiagnosticEmitter<Token>;
+using TokenDiagnosticEmitter = DiagnosticEmitter<TokenIndex>;
 
 }  // namespace Carbon::Lex
 

+ 1 - 1
toolchain/lex/tokenized_buffer_fuzzer.cpp

@@ -44,7 +44,7 @@ extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data,
   //
   // TODO: We should enhance this to do more sanity checks on the resulting
   // token stream.
-  for (Lex::Token token : buffer.tokens()) {
+  for (Lex::TokenIndex token : buffer.tokens()) {
     int line_number = buffer.GetLineNumber(token);
     CARBON_CHECK(line_number > 0) << "Invalid line number!";
     CARBON_CHECK(line_number < INT_MAX) << "Invalid line number!";

+ 2 - 2
toolchain/lex/tokenized_buffer_test.cpp

@@ -588,7 +588,7 @@ TEST_F(LexerTest, Whitespace) {
                   // EOF
                   false};
   int pos = 0;
-  for (Token token : buffer.tokens()) {
+  for (TokenIndex token : buffer.tokens()) {
     SCOPED_TRACE(
         llvm::formatv("Token #{0}: '{1}'", token, buffer.GetTokenText(token)));
 
@@ -856,7 +856,7 @@ TEST_F(LexerTest, InvalidStringLiterals) {
 
     // We should have formed at least one error token.
     bool found_error = false;
-    for (Token token : buffer.tokens()) {
+    for (TokenIndex token : buffer.tokens()) {
       if (buffer.GetKind(token) == TokenKind::Error) {
         found_error = true;
         break;

+ 16 - 16
toolchain/parse/context.cpp

@@ -56,7 +56,7 @@ Context::Context(Tree& tree, Lex::TokenizedBuffer& tokens,
       << tokens_->GetKind(*end_);
 }
 
-auto Context::AddLeafNode(NodeKind kind, Lex::Token token, bool has_error)
+auto Context::AddLeafNode(NodeKind kind, Lex::TokenIndex token, bool has_error)
     -> void {
   CheckNodeMatchesLexerToken(kind, tokens_->GetKind(token), has_error);
   tree_->node_impls_.push_back(
@@ -66,7 +66,7 @@ auto Context::AddLeafNode(NodeKind kind, Lex::Token token, bool has_error)
   }
 }
 
-auto Context::AddNode(NodeKind kind, Lex::Token token, int subtree_start,
+auto Context::AddNode(NodeKind kind, Lex::TokenIndex token, int subtree_start,
                       bool has_error) -> void {
   CheckNodeMatchesLexerToken(kind, tokens_->GetKind(token), has_error);
   int subtree_size = tree_->size() - subtree_start + 1;
@@ -77,9 +77,9 @@ auto Context::AddNode(NodeKind kind, Lex::Token token, int subtree_start,
   }
 }
 
-auto Context::ConsumeAndAddOpenParen(Lex::Token default_token,
+auto Context::ConsumeAndAddOpenParen(Lex::TokenIndex default_token,
                                      NodeKind start_kind)
-    -> std::optional<Lex::Token> {
+    -> std::optional<Lex::TokenIndex> {
   if (auto open_paren = ConsumeIf(Lex::TokenKind::OpenParen)) {
     AddLeafNode(start_kind, *open_paren, /*has_error=*/false);
     return open_paren;
@@ -93,7 +93,7 @@ auto Context::ConsumeAndAddOpenParen(Lex::Token default_token,
   }
 }
 
-auto Context::ConsumeAndAddCloseSymbol(Lex::Token expected_open,
+auto Context::ConsumeAndAddCloseSymbol(Lex::TokenIndex expected_open,
                                        StateStackEntry state,
                                        NodeKind close_kind) -> void {
   Lex::TokenKind open_token_kind = tokens().GetKind(expected_open);
@@ -126,13 +126,13 @@ auto Context::ConsumeAndAddLeafNodeIf(Lex::TokenKind token_kind,
   return true;
 }
 
-auto Context::ConsumeChecked(Lex::TokenKind kind) -> Lex::Token {
+auto Context::ConsumeChecked(Lex::TokenKind kind) -> Lex::TokenIndex {
   CARBON_CHECK(PositionIs(kind))
       << "Required " << kind << ", found " << PositionKind();
   return Consume();
 }
 
-auto Context::ConsumeIf(Lex::TokenKind kind) -> std::optional<Lex::Token> {
+auto Context::ConsumeIf(Lex::TokenKind kind) -> std::optional<Lex::TokenIndex> {
   if (!PositionIs(kind)) {
     return std::nullopt;
   }
@@ -150,10 +150,10 @@ auto Context::ConsumeIfPatternKeyword(Lex::TokenKind keyword_token,
 }
 
 auto Context::FindNextOf(std::initializer_list<Lex::TokenKind> desired_kinds)
-    -> std::optional<Lex::Token> {
+    -> std::optional<Lex::TokenIndex> {
   auto new_position = position_;
   while (true) {
-    Lex::Token token = *new_position;
+    Lex::TokenIndex token = *new_position;
     Lex::TokenKind kind = tokens().GetKind(token);
     if (kind.IsOneOf(desired_kinds)) {
       return token;
@@ -183,19 +183,19 @@ auto Context::SkipMatchingGroup() -> bool {
   return true;
 }
 
-auto Context::SkipPastLikelyEnd(Lex::Token skip_root)
-    -> std::optional<Lex::Token> {
+auto Context::SkipPastLikelyEnd(Lex::TokenIndex skip_root)
+    -> std::optional<Lex::TokenIndex> {
   if (position_ == end_) {
     return std::nullopt;
   }
 
-  Lex::Line root_line = tokens().GetLine(skip_root);
+  Lex::LineIndex root_line = tokens().GetLine(skip_root);
   int root_line_indent = tokens().GetIndentColumnNumber(root_line);
 
   // We will keep scanning through tokens on the same line as the root or
   // lines with greater indentation than root's line.
-  auto is_same_line_or_indent_greater_than_root = [&](Lex::Token t) {
-    Lex::Line l = tokens().GetLine(t);
+  auto is_same_line_or_indent_greater_than_root = [&](Lex::TokenIndex t) {
+    Lex::LineIndex l = tokens().GetLine(t);
     if (l == root_line) {
       return true;
     }
@@ -229,7 +229,7 @@ auto Context::SkipPastLikelyEnd(Lex::Token skip_root)
   return std::nullopt;
 }
 
-auto Context::SkipTo(Lex::Token t) -> void {
+auto Context::SkipTo(Lex::TokenIndex t) -> void {
   CARBON_CHECK(t >= *position_) << "Tried to skip backwards from " << position_
                                 << " to " << Lex::TokenIterator(t);
   position_ = Lex::TokenIterator(t);
@@ -454,7 +454,7 @@ auto Context::PrintForStackDump(llvm::raw_ostream& output) const -> void {
 }
 
 auto Context::PrintTokenForStackDump(llvm::raw_ostream& output,
-                                     Lex::Token token) const -> void {
+                                     Lex::TokenIndex token) const -> void {
   output << " @ " << tokens_->GetLineNumber(tokens_->GetLine(token)) << ":"
          << tokens_->GetColumnNumber(token) << ": token " << token << " : "
          << tokens_->GetKind(token) << "\n";

+ 24 - 21
toolchain/parse/context.h

@@ -62,8 +62,8 @@ class Context {
   // Used to track state on state_stack_.
   struct StateStackEntry : public Printable<StateStackEntry> {
     explicit StateStackEntry(State state, PrecedenceGroup ambient_precedence,
-                             PrecedenceGroup lhs_precedence, Lex::Token token,
-                             int32_t subtree_start)
+                             PrecedenceGroup lhs_precedence,
+                             Lex::TokenIndex token, int32_t subtree_start)
         : state(state),
           ambient_precedence(ambient_precedence),
           lhs_precedence(lhs_precedence),
@@ -92,7 +92,7 @@ class Context {
     // A token providing context based on the subtree. This will typically be
     // the first token in the subtree, but may sometimes be a token within. It
     // will typically be used for the subtree's root node.
-    Lex::Token token;
+    Lex::TokenIndex token;
     // The offset within the Tree of the subtree start.
     int32_t subtree_start;
   };
@@ -114,15 +114,15 @@ class Context {
                    llvm::raw_ostream* vlog_stream);
 
   // Adds a node to the parse tree that has no children (a leaf).
-  auto AddLeafNode(NodeKind kind, Lex::Token token, bool has_error = false)
+  auto AddLeafNode(NodeKind kind, Lex::TokenIndex token, bool has_error = false)
       -> void;
 
   // Adds a node to the parse tree that has children.
-  auto AddNode(NodeKind kind, Lex::Token token, int subtree_start,
+  auto AddNode(NodeKind kind, Lex::TokenIndex token, int subtree_start,
                bool has_error) -> void;
 
   // Returns the current position and moves past it.
-  auto Consume() -> Lex::Token { return *(position_++); }
+  auto Consume() -> Lex::TokenIndex { return *(position_++); }
 
   // Consumes the current token. Does not return it.
   auto ConsumeAndDiscard() -> void { ++position_; }
@@ -130,16 +130,18 @@ class Context {
   // Parses an open paren token, possibly diagnosing if necessary. Creates a
   // leaf parse node of the specified start kind. The default_token is used when
   // there's no open paren. Returns the open paren token if it was found.
-  auto ConsumeAndAddOpenParen(Lex::Token default_token, NodeKind start_kind)
-      -> std::optional<Lex::Token>;
+  auto ConsumeAndAddOpenParen(Lex::TokenIndex default_token,
+                              NodeKind start_kind)
+      -> std::optional<Lex::TokenIndex>;
 
   // Parses a closing symbol corresponding to the opening symbol
   // `expected_open`, possibly skipping forward and diagnosing if necessary.
   // Creates a parse node of the specified close kind. If `expected_open` is not
   // an opening symbol, the parse node will be associated with `state.token`,
   // no input will be consumed, and no diagnostic will be emitted.
-  auto ConsumeAndAddCloseSymbol(Lex::Token expected_open, StateStackEntry state,
-                                NodeKind close_kind) -> void;
+  auto ConsumeAndAddCloseSymbol(Lex::TokenIndex expected_open,
+                                StateStackEntry state, NodeKind close_kind)
+      -> void;
 
   // Composes `ConsumeIf` and `AddLeafNode`, returning false when ConsumeIf
   // fails.
@@ -148,16 +150,16 @@ class Context {
 
   // Returns the current position and moves past it. Requires the token is the
   // expected kind.
-  auto ConsumeChecked(Lex::TokenKind kind) -> Lex::Token;
+  auto ConsumeChecked(Lex::TokenKind kind) -> Lex::TokenIndex;
 
   // If the current position's token matches this `Kind`, returns it and
   // advances to the next position. Otherwise returns an empty optional.
-  auto ConsumeIf(Lex::TokenKind kind) -> std::optional<Lex::Token>;
+  auto ConsumeIf(Lex::TokenKind kind) -> std::optional<Lex::TokenIndex>;
 
   // Find the next token of any of the given kinds at the current bracketing
   // level.
   auto FindNextOf(std::initializer_list<Lex::TokenKind> desired_kinds)
-      -> std::optional<Lex::Token>;
+      -> std::optional<Lex::TokenIndex>;
 
   // If the token is an opening symbol for a matched group, skips to the matched
   // closing symbol and returns true. Otherwise, returns false.
@@ -179,10 +181,11 @@ class Context {
   //   declarations or statements across multiple lines should be indented.
   //
   // Returns a semicolon token if one is the likely end.
-  auto SkipPastLikelyEnd(Lex::Token skip_root) -> std::optional<Lex::Token>;
+  auto SkipPastLikelyEnd(Lex::TokenIndex skip_root)
+      -> std::optional<Lex::TokenIndex>;
 
   // Skip forward to the given token. Verifies that it is actually forward.
-  auto SkipTo(Lex::Token t) -> void;
+  auto SkipTo(Lex::TokenIndex t) -> void;
 
   // Returns true if the current token satisfies the lexical validity rules
   // for an infix operator.
@@ -243,7 +246,7 @@ class Context {
 
   // Pushes a new state with a specific token for context. Used when forming a
   // new subtree with a token that isn't the start of the subtree.
-  auto PushState(State state, Lex::Token token) -> void {
+  auto PushState(State state, Lex::TokenIndex token) -> void {
     PushState(StateStackEntry(state, PrecedenceGroup::ForTopLevelExpr(),
                               PrecedenceGroup::ForTopLevelExpr(), token,
                               tree_->size()));
@@ -337,18 +340,18 @@ class Context {
   auto set_packaging_state(PackagingState packaging_state) -> void {
     packaging_state_ = packaging_state;
   }
-  auto first_non_packaging_token() const -> Lex::Token {
+  auto first_non_packaging_token() const -> Lex::TokenIndex {
     return first_non_packaging_token_;
   }
-  auto set_first_non_packaging_token(Lex::Token token) -> void {
+  auto set_first_non_packaging_token(Lex::TokenIndex token) -> void {
     CARBON_CHECK(!first_non_packaging_token_.is_valid());
     first_non_packaging_token_ = token;
   }
 
  private:
   // Prints a single token for a stack dump. Used by PrintForStackDump.
-  auto PrintTokenForStackDump(llvm::raw_ostream& output, Lex::Token token) const
-      -> void;
+  auto PrintTokenForStackDump(llvm::raw_ostream& output,
+                              Lex::TokenIndex token) const -> void;
 
   Tree* tree_;
   Lex::TokenizedBuffer* tokens_;
@@ -368,7 +371,7 @@ class Context {
   PackagingState packaging_state_ = PackagingState::FileStart;
   // The first non-packaging token, starting as invalid. Used for packaging
   // state warnings.
-  Lex::Token first_non_packaging_token_ = Lex::Token::Invalid;
+  Lex::TokenIndex first_non_packaging_token_ = Lex::TokenIndex::Invalid;
 };
 
 // `clang-format` has a bug with spacing around `->` returns in macros. See

+ 1 - 1
toolchain/parse/handle_decl_name_and_params.cpp

@@ -27,7 +27,7 @@ static auto HandleDeclNameAndParams(Context& context, State after_name)
     CARBON_DIAGNOSTIC(ExpectedDeclName, Error,
                       "`{0}` introducer should be followed by a name.",
                       Lex::TokenKind);
-    Lex::Token location = *context.position();
+    Lex::TokenIndex location = *context.position();
     if (context.tokens().GetKind(location) == Lex::TokenKind::FileEnd) {
       // The end of file is often an especially unhelpful location. If that's
       // the best we can do here, back up the location to the introducer itself.

+ 2 - 2
toolchain/parse/handle_import_and_package.cpp

@@ -212,7 +212,7 @@ static auto HandlePackageAndLibraryDirectives(Context& context,
   auto intro_token = context.ConsumeChecked(intro_token_kind);
   context.AddLeafNode(intro, intro_token);
 
-  if (intro_token != Lex::Token::FirstNonCommentToken) {
+  if (intro_token != Lex::TokenIndex::FirstNonCommentToken) {
     CARBON_DIAGNOSTIC(PackageTooLate, Error,
                       "The `{0}` directive must be the first non-comment line.",
                       Lex::TokenKind);
@@ -220,7 +220,7 @@ static auto HandlePackageAndLibraryDirectives(Context& context,
                       "First non-comment line is here.");
     context.emitter()
         .Build(intro_token, PackageTooLate, intro_token_kind)
-        .Note(Lex::Token::FirstNonCommentToken, FirstNonCommentLine)
+        .Note(Lex::TokenIndex::FirstNonCommentToken, FirstNonCommentLine)
         .Emit();
     on_parse_error();
     return;

+ 1 - 1
toolchain/parse/handle_paren_condition.cpp

@@ -11,7 +11,7 @@ static auto HandleParenCondition(Context& context, NodeKind start_kind,
                                  State finish_state) -> void {
   auto state = context.PopState();
 
-  std::optional<Lex::Token> open_paren =
+  std::optional<Lex::TokenIndex> open_paren =
       context.ConsumeAndAddOpenParen(state.token, start_kind);
   if (open_paren) {
     state.token = *open_paren;

+ 1 - 1
toolchain/parse/handle_statement.cpp

@@ -91,7 +91,7 @@ auto HandleStatementContinueFinish(Context& context) -> void {
 auto HandleStatementForHeader(Context& context) -> void {
   auto state = context.PopState();
 
-  std::optional<Lex::Token> open_paren =
+  std::optional<Lex::TokenIndex> open_paren =
       context.ConsumeAndAddOpenParen(state.token, NodeKind::ForHeaderStart);
   if (open_paren) {
     state.token = *open_paren;

+ 2 - 1
toolchain/parse/handle_var.cpp

@@ -8,7 +8,8 @@ namespace Carbon::Parse {
 
 // Handles VarAs(Decl|For).
 static auto HandleVar(Context& context, State finish_state,
-                      Lex::Token returned_token = Lex::Token::Invalid) -> void {
+                      Lex::TokenIndex returned_token = Lex::TokenIndex::Invalid)
+    -> void {
   auto state = context.PopState();
 
   // The finished variable declaration will start at the `var` or `returned`.

+ 1 - 1
toolchain/parse/node_kind.def

@@ -17,7 +17,7 @@
 //   - CARBON_PARSE_NODE_KIND_CHILD_COUNT(Name, ChildCount, LexTokenKinds)
 //     Defines a parse node with a set number of children, often 0. This count
 //     must be correct even when the node contains errors.
-//   - CARBON_PARSE_NODE_KIND_TOKEN_LITERAL(Name, Token)
+//   - CARBON_PARSE_NODE_KIND_TOKEN_LITERAL(Name, TokenIndex)
 //     Defines a parse node that corresponds to a token that is a single-token
 //     literal. The token is wrapped for LexTokenKinds.
 //

+ 1 - 1
toolchain/parse/tree.cpp

@@ -93,7 +93,7 @@ auto Tree::node_kind(NodeId n) const -> NodeKind {
   return node_impls_[n.index].kind;
 }
 
-auto Tree::node_token(NodeId n) const -> Lex::Token {
+auto Tree::node_token(NodeId n) const -> Lex::TokenIndex {
   CARBON_CHECK(n.is_valid());
   return node_impls_[n.index].token;
 }

+ 3 - 3
toolchain/parse/tree.h

@@ -119,7 +119,7 @@ class Tree : public Printable<Tree> {
   auto node_kind(NodeId n) const -> NodeKind;
 
   // Returns the token the given parse tree node models.
-  auto node_token(NodeId n) const -> Lex::Token;
+  auto node_token(NodeId n) const -> Lex::TokenIndex;
 
   auto node_subtree_size(NodeId n) const -> int32_t;
 
@@ -185,7 +185,7 @@ class Tree : public Printable<Tree> {
   // The in-memory representation of data used for a particular node in the
   // tree.
   struct NodeImpl {
-    explicit NodeImpl(NodeKind kind, bool has_error, Lex::Token token,
+    explicit NodeImpl(NodeKind kind, bool has_error, Lex::TokenIndex token,
                       int subtree_size)
         : kind(kind),
           has_error(has_error),
@@ -214,7 +214,7 @@ class Tree : public Printable<Tree> {
     bool has_error = false;
 
     // The token root of this node.
-    Lex::Token token;
+    Lex::TokenIndex token;
 
     // The size of this node's subtree of the parse tree. This is the number of
     // nodes (and thus tokens) that are covered by this node (and its