Просмотр исходного кода

Style updates, mostly _ naming (#970)

There are some declaration order changes, and a few test classes switched from `struct` to `class`. However, this PR is mostly adopting `_` naming of private member variables due to the shift in naming style. None of what's here should have behavior impacts, it should just be style.

Note, there are a lot of things that *look* like they could be accessor-named, but I'm not doing that in this change. Happy to do it separately if you want me to do another PR focused on it.

Co-authored-by: Chandler Carruth <chandlerc@gmail.com>
Jon Meow 4 лет назад
Родитель
Сommit
652cd8c636

+ 2 - 1
toolchain/common/yaml_test_helpers.cpp

@@ -72,7 +72,6 @@ auto operator<<(std::ostream& os, const Value& v) -> std::ostream& {
   // Variant visitor that prints the value in the form of code to recreate the
   // value.
   struct Printer {
-    std::ostream& out;
     auto operator()(NullValue) -> void { out << "Yaml::NullValue()"; }
     auto operator()(AliasValue) -> void { out << "Yaml::AliasValue()"; }
     auto operator()(ErrorValue) -> void { out << "Yaml::ErrorValue()"; }
@@ -103,6 +102,8 @@ auto operator<<(std::ostream& os, const Value& v) -> std::ostream& {
       }
       out << "}";
     }
+
+    std::ostream& out;
   };
   std::visit(Printer{os}, v);
   return os;

+ 7 - 7
toolchain/diagnostics/diagnostic_emitter.h

@@ -154,22 +154,22 @@ struct SimpleDiagnostic {
 class ErrorTrackingDiagnosticConsumer : public DiagnosticConsumer {
  public:
   explicit ErrorTrackingDiagnosticConsumer(DiagnosticConsumer& next_consumer)
-      : next_consumer(&next_consumer) {}
+      : next_consumer_(&next_consumer) {}
 
   auto HandleDiagnostic(const Diagnostic& diagnostic) -> void override {
-    seen_error |= diagnostic.level == Diagnostic::Error;
-    next_consumer->HandleDiagnostic(diagnostic);
+    seen_error_ |= diagnostic.level == Diagnostic::Error;
+    next_consumer_->HandleDiagnostic(diagnostic);
   }
 
   // Returns whether we've seen an error since the last reset.
-  auto SeenError() const -> bool { return seen_error; }
+  auto SeenError() const -> bool { return seen_error_; }
 
   // Reset whether we've seen an error.
-  auto Reset() -> void { seen_error = false; }
+  auto Reset() -> void { seen_error_ = false; }
 
  private:
-  DiagnosticConsumer* next_consumer;
-  bool seen_error = false;
+  DiagnosticConsumer* next_consumer_;
+  bool seen_error_ = false;
 };
 
 }  // namespace Carbon

+ 2 - 2
toolchain/diagnostics/diagnostic_emitter_test.cpp

@@ -25,8 +25,6 @@ struct FakeDiagnostic {
   // selection of the message.
   static constexpr llvm::StringLiteral Message = "{0}";
 
-  std::string message;
-
   auto Format() -> std::string {
     // Work around a bug in Clang's unused const variable warning by marking it
     // used here with a no-op.
@@ -34,6 +32,8 @@ struct FakeDiagnostic {
 
     return llvm::formatv(Message.data(), message).str();
   }
+
+  std::string message;
 };
 
 struct FakeDiagnosticLocationTranslator : DiagnosticLocationTranslator<int> {

+ 22 - 22
toolchain/driver/driver.cpp

@@ -37,7 +37,7 @@ auto GetSubcommand(llvm::StringRef name) -> Subcommand {
 
 auto Driver::RunFullCommand(llvm::ArrayRef<llvm::StringRef> args) -> bool {
   if (args.empty()) {
-    error_stream << "ERROR: No subcommand specified.\n";
+    error_stream_ << "ERROR: No subcommand specified.\n";
     return false;
   }
 
@@ -46,8 +46,8 @@ auto Driver::RunFullCommand(llvm::ArrayRef<llvm::StringRef> args) -> bool {
       std::next(args.begin()), args.end());
   switch (GetSubcommand(subcommand_text)) {
     case Subcommand::Unknown:
-      error_stream << "ERROR: Unknown subcommand '" << subcommand_text
-                   << "'.\n";
+      error_stream_ << "ERROR: Unknown subcommand '" << subcommand_text
+                    << "'.\n";
       return false;
 
 #define CARBON_SUBCOMMAND(Name, ...) \
@@ -66,7 +66,7 @@ auto Driver::RunHelpSubcommand(llvm::ArrayRef<llvm::StringRef> args) -> bool {
     return false;
   }
 
-  output_stream << "List of subcommands:\n\n";
+  output_stream_ << "List of subcommands:\n\n";
 
   constexpr llvm::StringLiteral SubcommandsAndHelp[][2] = {
 #define CARBON_SUBCOMMAND(Name, Spelling, HelpText) {Spelling, HelpText},
@@ -84,19 +84,19 @@ auto Driver::RunHelpSubcommand(llvm::ArrayRef<llvm::StringRef> args) -> bool {
     // FIXME: We should wrap this to the number of columns left after the
     // subcommand on the terminal, and using a hanging indent.
     llvm::StringRef help_text = subcommand_and_help[1];
-    output_stream << "  "
-                  << llvm::left_justify(subcommand_text, max_subcommand_width)
-                  << " - " << help_text << "\n";
+    output_stream_ << "  "
+                   << llvm::left_justify(subcommand_text, max_subcommand_width)
+                   << " - " << help_text << "\n";
   }
 
-  output_stream << "\n";
+  output_stream_ << "\n";
   return true;
 }
 
 auto Driver::RunDumpTokensSubcommand(llvm::ArrayRef<llvm::StringRef> args)
     -> bool {
   if (args.empty()) {
-    error_stream << "ERROR: No input file specified.\n";
+    error_stream_ << "ERROR: No input file specified.\n";
     return false;
   }
 
@@ -109,24 +109,24 @@ auto Driver::RunDumpTokensSubcommand(llvm::ArrayRef<llvm::StringRef> args)
 
   auto source = SourceBuffer::CreateFromFile(input_file_name);
   if (!source) {
-    error_stream << "ERROR: Unable to open input source file: ";
+    error_stream_ << "ERROR: Unable to open input source file: ";
     llvm::handleAllErrors(source.takeError(),
                           [&](const llvm::ErrorInfoBase& ei) {
-                            ei.log(error_stream);
-                            error_stream << "\n";
+                            ei.log(error_stream_);
+                            error_stream_ << "\n";
                           });
     return false;
   }
   auto tokenized_source =
       TokenizedBuffer::Lex(*source, ConsoleDiagnosticConsumer());
-  tokenized_source.Print(output_stream);
+  tokenized_source.Print(output_stream_);
   return !tokenized_source.HasErrors();
 }
 
 auto Driver::RunDumpParseTreeSubcommand(llvm::ArrayRef<llvm::StringRef> args)
     -> bool {
   if (args.empty()) {
-    error_stream << "ERROR: No input file specified.\n";
+    error_stream_ << "ERROR: No input file specified.\n";
     return false;
   }
 
@@ -139,11 +139,11 @@ auto Driver::RunDumpParseTreeSubcommand(llvm::ArrayRef<llvm::StringRef> args)
 
   auto source = SourceBuffer::CreateFromFile(input_file_name);
   if (!source) {
-    error_stream << "ERROR: Unable to open input source file: ";
+    error_stream_ << "ERROR: Unable to open input source file: ";
     llvm::handleAllErrors(source.takeError(),
                           [&](const llvm::ErrorInfoBase& ei) {
-                            ei.log(error_stream);
-                            error_stream << "\n";
+                            ei.log(error_stream_);
+                            error_stream_ << "\n";
                           });
     return false;
   }
@@ -151,19 +151,19 @@ auto Driver::RunDumpParseTreeSubcommand(llvm::ArrayRef<llvm::StringRef> args)
       TokenizedBuffer::Lex(*source, ConsoleDiagnosticConsumer());
   auto parse_tree =
       ParseTree::Parse(tokenized_source, ConsoleDiagnosticConsumer());
-  parse_tree.Print(output_stream);
+  parse_tree.Print(output_stream_);
   return !tokenized_source.HasErrors() && !parse_tree.HasErrors();
 }
 
 auto Driver::ReportExtraArgs(llvm::StringRef subcommand_text,
                              llvm::ArrayRef<llvm::StringRef> args) -> void {
-  error_stream << "ERROR: Unexpected additional arguments to the '"
-               << subcommand_text << "' subcommand:";
+  error_stream_ << "ERROR: Unexpected additional arguments to the '"
+                << subcommand_text << "' subcommand:";
   for (auto arg : args) {
-    error_stream << " " << arg;
+    error_stream_ << " " << arg;
   }
 
-  error_stream << "\n";
+  error_stream_ << "\n";
 }
 
 }  // namespace Carbon

+ 4 - 4
toolchain/driver/driver.h

@@ -23,12 +23,12 @@ class Driver {
  public:
   // Default constructed driver uses stderr for all error and informational
   // output.
-  Driver() : output_stream(llvm::outs()), error_stream(llvm::errs()) {}
+  Driver() : output_stream_(llvm::outs()), error_stream_(llvm::errs()) {}
 
   // Constructs a driver with any error or informational output directed to a
   // specified stream.
   Driver(llvm::raw_ostream& output_stream, llvm::raw_ostream& error_stream)
-      : output_stream(output_stream), error_stream(error_stream) {}
+      : output_stream_(output_stream), error_stream_(error_stream) {}
 
   // Parses the given arguments into both a subcommand to select the operation
   // to perform and any arguments to that subcommand.
@@ -71,8 +71,8 @@ class Driver {
   auto ReportExtraArgs(llvm::StringRef subcommand_text,
                        llvm::ArrayRef<llvm::StringRef> args) -> void;
 
-  llvm::raw_ostream& output_stream;
-  llvm::raw_ostream& error_stream;
+  llvm::raw_ostream& output_stream_;
+  llvm::raw_ostream& error_stream_;
 };
 
 }  // namespace Carbon

+ 15 - 14
toolchain/driver/driver_test.cpp

@@ -22,21 +22,11 @@ namespace Yaml = Carbon::Testing::Yaml;
 
 /// A raw_ostream that makes it easy to repeatedly check streamed output.
 class RawTestOstream : public llvm::raw_ostream {
-  std::string buffer;
-
-  void write_impl(const char* ptr, size_t size) override {
-    buffer.append(ptr, ptr + size);
-  }
-
-  [[nodiscard]] auto current_pos() const -> uint64_t override {
-    return buffer.size();
-  }
-
  public:
   ~RawTestOstream() override {
     flush();
-    if (!buffer.empty()) {
-      ADD_FAILURE() << "Unchecked output:\n" << buffer;
+    if (!buffer_.empty()) {
+      ADD_FAILURE() << "Unchecked output:\n" << buffer_;
     }
   }
 
@@ -44,10 +34,21 @@ class RawTestOstream : public llvm::raw_ostream {
   /// back to empty.
   auto TakeStr() -> std::string {
     flush();
-    std::string result = std::move(buffer);
-    buffer.clear();
+    std::string result = std::move(buffer_);
+    buffer_.clear();
     return result;
   }
+
+ private:
+  void write_impl(const char* ptr, size_t size) override {
+    buffer_.append(ptr, ptr + size);
+  }
+
+  [[nodiscard]] auto current_pos() const -> uint64_t override {
+    return buffer_.size();
+  }
+
+  std::string buffer_;
 };
 
 TEST(DriverTest, FullCommandErrors) {

+ 67 - 66
toolchain/lexer/numeric_literal.cpp

@@ -22,9 +22,6 @@ struct EmptyDigitSequence : SimpleDiagnostic<EmptyDigitSequence> {
 struct InvalidDigit {
   static constexpr llvm::StringLiteral ShortName = "syntax-invalid-number";
 
-  char digit;
-  int radix;
-
   auto Format() -> std::string {
     return llvm::formatv(
                "Invalid digit '{0}' in {1} numeric literal.", digit,
@@ -32,6 +29,9 @@ struct InvalidDigit {
                            : (radix == 16 ? "hexadecimal" : "decimal")))
         .str();
   }
+
+  char digit;
+  int radix;
 };
 
 struct InvalidDigitSeparator : SimpleDiagnostic<InvalidDigitSeparator> {
@@ -44,8 +44,6 @@ struct IrregularDigitSeparators {
   static constexpr llvm::StringLiteral ShortName =
       "syntax-irregular-digit-separators";
 
-  int radix;
-
   auto Format() -> std::string {
     assert((radix == 10 || radix == 16) && "unexpected radix");
     return llvm::formatv(
@@ -55,6 +53,8 @@ struct IrregularDigitSeparators {
                (radix == 10 ? "3" : "4"))
         .str();
   }
+
+  int radix;
 };
 
 struct UnknownBaseSpecifier : SimpleDiagnostic<UnknownBaseSpecifier> {
@@ -72,12 +72,12 @@ struct BinaryRealLiteral : SimpleDiagnostic<BinaryRealLiteral> {
 struct WrongRealLiteralExponent {
   static constexpr llvm::StringLiteral ShortName = "syntax-invalid-number";
 
-  char expected;
-
   auto Format() -> std::string {
     return llvm::formatv("Expected '{0}' to introduce exponent.", expected)
         .str();
   }
+
+  char expected;
 };
 }  // namespace
 
@@ -103,7 +103,7 @@ auto LexedNumericLiteral::Lex(llvm::StringRef source_text)
     char c = source_text[i];
     if (IsAlnum(c) || c == '_') {
       if (IsLower(c) && seen_radix_point && !seen_plus_minus) {
-        result.exponent = i;
+        result.exponent_ = i;
         seen_potential_exponent = true;
       }
       continue;
@@ -113,7 +113,7 @@ auto LexedNumericLiteral::Lex(llvm::StringRef source_text)
     // an alphanumeric character.
     if (c == '.' && i + 1 != n && IsAlnum(source_text[i + 1]) &&
         !seen_radix_point) {
-      result.radix_point = i;
+      result.radix_point_ = i;
       seen_radix_point = true;
       continue;
     }
@@ -123,7 +123,8 @@ auto LexedNumericLiteral::Lex(llvm::StringRef source_text)
     // followed by an alphanumeric character. This '+' or '-' cannot be an
     // operator because a literal cannot end in a lowercase letter.
     if ((c == '+' || c == '-') && seen_potential_exponent &&
-        result.exponent == i - 1 && i + 1 != n && IsAlnum(source_text[i + 1])) {
+        result.exponent_ == i - 1 && i + 1 != n &&
+        IsAlnum(source_text[i + 1])) {
       // This is not possible because we don't update result.exponent after we
       // see a '+' or '-'.
       assert(!seen_plus_minus && "should only consume one + or -");
@@ -134,12 +135,12 @@ auto LexedNumericLiteral::Lex(llvm::StringRef source_text)
     break;
   }
 
-  result.text = source_text.substr(0, i);
+  result.text_ = source_text.substr(0, i);
   if (!seen_radix_point) {
-    result.radix_point = i;
+    result.radix_point_ = i;
   }
   if (!seen_potential_exponent) {
-    result.exponent = i;
+    result.exponent_ = i;
   }
 
   return result;
@@ -154,7 +155,7 @@ class LexedNumericLiteral::Parser {
   Parser(DiagnosticEmitter<const char*>& emitter, LexedNumericLiteral literal);
 
   auto IsInteger() -> bool {
-    return literal.radix_point == static_cast<int>(literal.text.size());
+    return literal_.radix_point_ == static_cast<int>(literal_.text_.size());
   }
 
   // Check that the numeric literal token is syntactically valid and
@@ -164,7 +165,7 @@ class LexedNumericLiteral::Parser {
   auto Check() -> bool;
 
   // Get the radix of this token. One of 2, 10, or 16.
-  auto GetRadix() -> int { return radix; }
+  auto GetRadix() -> int { return radix_; }
 
   // Get the mantissa of this token's value.
   auto GetMantissa() -> llvm::APInt;
@@ -189,46 +190,45 @@ class LexedNumericLiteral::Parser {
   auto CheckFractionalPart() -> bool;
   auto CheckExponentPart() -> bool;
 
- private:
-  DiagnosticEmitter<const char*>& emitter;
-  LexedNumericLiteral literal;
+  DiagnosticEmitter<const char*>& emitter_;
+  LexedNumericLiteral literal_;
 
   // The radix of the literal: 2, 10, or 16, for a prefix of '0b', no prefix,
   // or '0x', respectively.
-  int radix = 10;
+  int radix_ = 10;
 
   // The various components of a numeric literal:
   //
   //     [radix] int_part [. fract_part [[ep] [+-] exponent_part]]
-  llvm::StringRef int_part;
-  llvm::StringRef fract_part;
-  llvm::StringRef exponent_part;
+  llvm::StringRef int_part_;
+  llvm::StringRef fract_part_;
+  llvm::StringRef exponent_part_;
 
   // Do we need to remove any special characters (digit separator or radix
   // point) before interpreting the mantissa or exponent as an integer?
-  bool mantissa_needs_cleaning = false;
-  bool exponent_needs_cleaning = false;
+  bool mantissa_needs_cleaning_ = false;
+  bool exponent_needs_cleaning_ = false;
 
   // True if we found a `-` before `exponent_part`.
-  bool exponent_is_negative = false;
+  bool exponent_is_negative_ = false;
 };
 
 LexedNumericLiteral::Parser::Parser(DiagnosticEmitter<const char*>& emitter,
                                     LexedNumericLiteral literal)
-    : emitter(emitter), literal(literal) {
-  int_part = literal.text.substr(0, literal.radix_point);
-  if (int_part.consume_front("0x")) {
-    radix = 16;
-  } else if (int_part.consume_front("0b")) {
-    radix = 2;
+    : emitter_(emitter), literal_(literal) {
+  int_part_ = literal.text_.substr(0, literal.radix_point_);
+  if (int_part_.consume_front("0x")) {
+    radix_ = 16;
+  } else if (int_part_.consume_front("0b")) {
+    radix_ = 2;
   }
 
-  fract_part = literal.text.substr(literal.radix_point + 1,
-                                   literal.exponent - literal.radix_point - 1);
+  fract_part_ = literal.text_.substr(
+      literal.radix_point_ + 1, literal.exponent_ - literal.radix_point_ - 1);
 
-  exponent_part = literal.text.substr(literal.exponent + 1);
-  if (!exponent_part.consume_front("+")) {
-    exponent_is_negative = exponent_part.consume_front("-");
+  exponent_part_ = literal.text_.substr(literal.exponent_ + 1);
+  if (!exponent_part_.consume_front("+")) {
+    exponent_is_negative_ = exponent_part_.consume_front("-");
   }
 }
 
@@ -266,17 +266,17 @@ static auto ParseInteger(llvm::StringRef digits, int radix, bool needs_cleaning)
 }
 
 auto LexedNumericLiteral::Parser::GetMantissa() -> llvm::APInt {
-  const char* end = IsInteger() ? int_part.end() : fract_part.end();
-  llvm::StringRef digits(int_part.begin(), end - int_part.begin());
-  return ParseInteger(digits, radix, mantissa_needs_cleaning);
+  const char* end = IsInteger() ? int_part_.end() : fract_part_.end();
+  llvm::StringRef digits(int_part_.begin(), end - int_part_.begin());
+  return ParseInteger(digits, radix_, mantissa_needs_cleaning_);
 }
 
 auto LexedNumericLiteral::Parser::GetExponent() -> llvm::APInt {
   // Compute the effective exponent from the specified exponent, if any,
   // and the position of the radix point.
   llvm::APInt exponent(64, 0);
-  if (!exponent_part.empty()) {
-    exponent = ParseInteger(exponent_part, 10, exponent_needs_cleaning);
+  if (!exponent_part_.empty()) {
+    exponent = ParseInteger(exponent_part_, 10, exponent_needs_cleaning_);
 
     // The exponent is a signed integer, and the number we just parsed is
     // non-negative, so ensure we have a wide enough representation to
@@ -285,18 +285,18 @@ auto LexedNumericLiteral::Parser::GetExponent() -> llvm::APInt {
     if (exponent.isSignBitSet() || exponent.getBitWidth() < 64) {
       exponent = exponent.zext(std::max(64u, exponent.getBitWidth() + 1));
     }
-    if (exponent_is_negative) {
+    if (exponent_is_negative_) {
       exponent.negate();
     }
   }
 
   // Each character after the decimal point reduces the effective exponent.
-  int excess_exponent = fract_part.size();
-  if (radix == 16) {
+  int excess_exponent = fract_part_.size();
+  if (radix_ == 16) {
     excess_exponent *= 4;
   }
   exponent -= excess_exponent;
-  if (exponent_is_negative && !exponent.isNegative()) {
+  if (exponent_is_negative_ && !exponent.isNegative()) {
     // We overflowed. Note that we can only overflow by a little, and only
     // from negative to positive, because exponent is at least 64 bits wide
     // and excess_exponent is bounded above by four times the size of the
@@ -343,19 +343,19 @@ auto LexedNumericLiteral::Parser::CheckDigitSequence(
       // next to another digit separator, or at the end.
       if (!allow_digit_separators || i == 0 || text[i - 1] == '_' ||
           i + 1 == n) {
-        emitter.EmitError<InvalidDigitSeparator>(text.begin() + i);
+        emitter_.EmitError<InvalidDigitSeparator>(text.begin() + i);
       }
       ++num_digit_separators;
       continue;
     }
 
-    emitter.EmitError<InvalidDigit>(text.begin() + i,
-                                    {.digit = c, .radix = radix});
+    emitter_.EmitError<InvalidDigit>(text.begin() + i,
+                                     {.digit = c, .radix = radix});
     return {.ok = false};
   }
 
   if (num_digit_separators == static_cast<int>(text.size())) {
-    emitter.EmitError<EmptyDigitSequence>(text.begin());
+    emitter_.EmitError<EmptyDigitSequence>(text.begin());
     return {.ok = false};
   }
 
@@ -384,7 +384,8 @@ auto LexedNumericLiteral::Parser::CheckDigitSeparatorPlacement(
          "unexpected radix for digit separator checks");
 
   auto diagnose_irregular_digit_separators = [&]() {
-    emitter.EmitError<IrregularDigitSeparators>(text.begin(), {.radix = radix});
+    emitter_.EmitError<IrregularDigitSeparators>(text.begin(),
+                                                 {.radix = radix});
   };
 
   // For decimal and hexadecimal digit sequences, digit separators must form
@@ -410,8 +411,8 @@ auto LexedNumericLiteral::Parser::CheckDigitSeparatorPlacement(
 
 // Check that we don't have a '0' prefix on a non-zero decimal integer.
 auto LexedNumericLiteral::Parser::CheckLeadingZero() -> bool {
-  if (radix == 10 && int_part.startswith("0") && int_part != "0") {
-    emitter.EmitError<UnknownBaseSpecifier>(int_part.begin());
+  if (radix_ == 10 && int_part_.startswith("0") && int_part_ != "0") {
+    emitter_.EmitError<UnknownBaseSpecifier>(int_part_.begin());
     return false;
   }
   return true;
@@ -419,8 +420,8 @@ auto LexedNumericLiteral::Parser::CheckLeadingZero() -> bool {
 
 // Check the integer part (before the '.', if any) is valid.
 auto LexedNumericLiteral::Parser::CheckIntPart() -> bool {
-  auto int_result = CheckDigitSequence(int_part, radix);
-  mantissa_needs_cleaning |= int_result.has_digit_separators;
+  auto int_result = CheckDigitSequence(int_part_, radix_);
+  mantissa_needs_cleaning_ |= int_result.has_digit_separators;
   return int_result.ok;
 }
 
@@ -431,36 +432,36 @@ auto LexedNumericLiteral::Parser::CheckFractionalPart() -> bool {
     return true;
   }
 
-  if (radix == 2) {
-    emitter.EmitError<BinaryRealLiteral>(literal.text.begin() +
-                                         literal.radix_point);
+  if (radix_ == 2) {
+    emitter_.EmitError<BinaryRealLiteral>(literal_.text_.begin() +
+                                          literal_.radix_point_);
     // Carry on and parse the binary real literal anyway.
   }
 
   // We need to remove a '.' from the mantissa.
-  mantissa_needs_cleaning = true;
+  mantissa_needs_cleaning_ = true;
 
-  return CheckDigitSequence(fract_part, radix,
+  return CheckDigitSequence(fract_part_, radix_,
                             /*allow_digit_separators=*/false)
       .ok;
 }
 
 // Check the exponent part (if any) is valid.
 auto LexedNumericLiteral::Parser::CheckExponentPart() -> bool {
-  if (literal.exponent == static_cast<int>(literal.text.size())) {
+  if (literal_.exponent_ == static_cast<int>(literal_.text_.size())) {
     return true;
   }
 
-  char expected_exponent_kind = (radix == 10 ? 'e' : 'p');
-  if (literal.text[literal.exponent] != expected_exponent_kind) {
-    emitter.EmitError<WrongRealLiteralExponent>(
-        literal.text.begin() + literal.exponent,
+  char expected_exponent_kind = (radix_ == 10 ? 'e' : 'p');
+  if (literal_.text_[literal_.exponent_] != expected_exponent_kind) {
+    emitter_.EmitError<WrongRealLiteralExponent>(
+        literal_.text_.begin() + literal_.exponent_,
         {.expected = expected_exponent_kind});
     return false;
   }
 
-  auto exponent_result = CheckDigitSequence(exponent_part, 10);
-  exponent_needs_cleaning = exponent_result.has_digit_separators;
+  auto exponent_result = CheckDigitSequence(exponent_part_, 10);
+  exponent_needs_cleaning_ = exponent_result.has_digit_separators;
   return exponent_result.ok;
 }
 

+ 14 - 14
toolchain/lexer/numeric_literal.h

@@ -18,15 +18,6 @@ namespace Carbon {
 // A numeric literal token that has been extracted from a source buffer.
 class LexedNumericLiteral {
  public:
-  // Get the text corresponding to this literal.
-  [[nodiscard]] auto Text() const -> llvm::StringRef { return text; }
-
-  // Extract a numeric literal from the given text, if it has a suitable form.
-  //
-  // The supplied `source_text` must outlive the return value.
-  static auto Lex(llvm::StringRef source_text)
-      -> llvm::Optional<LexedNumericLiteral>;
-
   // Value of an integer literal.
   struct IntegerValue {
     // An unsigned literal value.
@@ -47,27 +38,36 @@ class LexedNumericLiteral {
 
   using Value = std::variant<IntegerValue, RealValue, UnrecoverableError>;
 
+  // Extract a numeric literal from the given text, if it has a suitable form.
+  //
+  // The supplied `source_text` must outlive the return value.
+  static auto Lex(llvm::StringRef source_text)
+      -> llvm::Optional<LexedNumericLiteral>;
+
+  // Get the text corresponding to this literal.
+  [[nodiscard]] auto Text() const -> llvm::StringRef { return text_; }
+
   // Compute the value of the token, if possible. Emit diagnostics to the given
   // emitter if the token is not valid.
   auto ComputeValue(DiagnosticEmitter<const char*>& emitter) const -> Value;
 
  private:
-  LexedNumericLiteral() = default;
-
   class Parser;
 
+  LexedNumericLiteral() = default;
+
   // The text of the token.
-  llvm::StringRef text;
+  llvm::StringRef text_;
 
   // The offset of the '.'. Set to text.size() if none is present.
-  int radix_point;
+  int radix_point_;
 
   // The offset of the alphabetical character introducing the exponent. In a
   // valid literal, this will be an 'e' or a 'p', and may be followed by a '+'
   // or a '-', but for error recovery, this may simply be the last lowercase
   // letter in the invalid token. Always greater than or equal to radix_point.
   // Set to text.size() if none is present.
-  int exponent;
+  int exponent_;
 };
 
 }  // namespace Carbon

+ 4 - 3
toolchain/lexer/numeric_literal_test.cpp

@@ -24,11 +24,10 @@ using ::testing::Matcher;
 using ::testing::Property;
 using ::testing::Truly;
 
-struct NumericLiteralTest : ::testing::Test {
+class NumericLiteralTest : public ::testing::Test {
+ protected:
   NumericLiteralTest() : error_tracker(ConsoleDiagnosticConsumer()) {}
 
-  ErrorTrackingDiagnosticConsumer error_tracker;
-
   auto Lex(llvm::StringRef text) -> LexedNumericLiteral {
     llvm::Optional<LexedNumericLiteral> result = LexedNumericLiteral::Lex(text);
     assert(result);
@@ -41,6 +40,8 @@ struct NumericLiteralTest : ::testing::Test {
     DiagnosticEmitter<const char*> emitter(translator, error_tracker);
     return Lex(text).ComputeValue(emitter);
   }
+
+  ErrorTrackingDiagnosticConsumer error_tracker;
 };
 
 // TODO: Use gmock's VariantWith once it exists.

+ 2 - 2
toolchain/lexer/string_literal.cpp

@@ -389,8 +389,8 @@ static auto ExpandEscapeSequencesAndRemoveIndent(
 auto LexedStringLiteral::ComputeValue(LexerDiagnosticEmitter& emitter) const
     -> std::string {
   llvm::StringRef indent =
-      multi_line ? CheckIndent(emitter, text, content) : llvm::StringRef();
-  return ExpandEscapeSequencesAndRemoveIndent(emitter, content, hash_level,
+      multi_line_ ? CheckIndent(emitter, text_, content_) : llvm::StringRef();
+  return ExpandEscapeSequencesAndRemoveIndent(emitter, content_, hash_level_,
                                               indent);
 }
 

+ 14 - 14
toolchain/lexer/string_literal.h

@@ -12,17 +12,17 @@ namespace Carbon {
 
 class LexedStringLiteral {
  public:
-  // Get the text corresponding to this literal.
-  [[nodiscard]] auto Text() const -> llvm::StringRef { return text; }
-
-  // Determine whether this is a multi-line string literal.
-  [[nodiscard]] auto IsMultiLine() const -> bool { return multi_line; }
-
   // Extract a string literal token from the given text, if it has a suitable
   // form.
   static auto Lex(llvm::StringRef source_text)
       -> llvm::Optional<LexedStringLiteral>;
 
+  // Get the text corresponding to this literal.
+  [[nodiscard]] auto Text() const -> llvm::StringRef { return text_; }
+
+  // Determine whether this is a multi-line string literal.
+  [[nodiscard]] auto IsMultiLine() const -> bool { return multi_line_; }
+
   // Expand any escape sequences in the given string literal and compute the
   // resulting value. This handles error recovery internally and cannot fail.
   auto ComputeValue(DiagnosticEmitter<const char*>& emitter) const
@@ -31,22 +31,22 @@ class LexedStringLiteral {
  private:
   LexedStringLiteral(llvm::StringRef text, llvm::StringRef content,
                      int hash_level, bool multi_line)
-      : text(text),
-        content(content),
-        hash_level(hash_level),
-        multi_line(multi_line) {}
+      : text_(text),
+        content_(content),
+        hash_level_(hash_level),
+        multi_line_(multi_line) {}
 
   // The complete text of the string literal.
-  llvm::StringRef text;
+  llvm::StringRef text_;
   // The content of the literal. For a multi-line literal, this begins
   // immediately after the newline following the file type indicator, and ends
   // at the start of the closing `"""`. Leading whitespace is not removed from
   // either end.
-  llvm::StringRef content;
+  llvm::StringRef content_;
   // The number of `#`s preceding the opening `"` or `"""`.
-  int hash_level;
+  int hash_level_;
   // Whether this was a multi-line string literal.
-  bool multi_line;
+  bool multi_line_;
 };
 
 }  // namespace Carbon

+ 4 - 3
toolchain/lexer/string_literal_test.cpp

@@ -14,11 +14,10 @@
 namespace Carbon {
 namespace {
 
-struct StringLiteralTest : ::testing::Test {
+class StringLiteralTest : public ::testing::Test {
+ protected:
   StringLiteralTest() : error_tracker(ConsoleDiagnosticConsumer()) {}
 
-  ErrorTrackingDiagnosticConsumer error_tracker;
-
   auto Lex(llvm::StringRef text) -> LexedStringLiteral {
     llvm::Optional<LexedStringLiteral> result = LexedStringLiteral::Lex(text);
     assert(result);
@@ -32,6 +31,8 @@ struct StringLiteralTest : ::testing::Test {
     DiagnosticEmitter<const char*> emitter(translator, error_tracker);
     return token.ComputeValue(emitter);
   }
+
+  ErrorTrackingDiagnosticConsumer error_tracker;
 };
 
 TEST_F(StringLiteralTest, StringLiteralBounds) {

+ 6 - 6
toolchain/lexer/test_helpers.h

@@ -23,18 +23,18 @@ class SingleTokenDiagnosticTranslator
   // Form a translator for a given token. The string provided here must refer
   // to the same character array that we are going to lex.
   explicit SingleTokenDiagnosticTranslator(llvm::StringRef token)
-      : token(token) {}
+      : token_(token) {}
 
   auto GetLocation(const char* pos) -> Diagnostic::Location override {
-    assert(llvm::is_sorted(std::array{token.begin(), pos, token.end()}) &&
+    assert(llvm::is_sorted(std::array{token_.begin(), pos, token_.end()}) &&
            "invalid diagnostic location");
-    llvm::StringRef prefix = token.take_front(pos - token.begin());
+    llvm::StringRef prefix = token_.take_front(pos - token_.begin());
     auto [before_last_newline, this_line] = prefix.rsplit('\n');
     if (before_last_newline.size() == prefix.size()) {
       // On first line.
       return {.file_name = SynthesizeFilename(),
               .line_number = 1,
-              .column_number = static_cast<int32_t>(pos - token.begin() + 1)};
+              .column_number = static_cast<int32_t>(pos - token_.begin() + 1)};
     } else {
       // On second or subsequent lines. Note that the line number here is 2
       // more than the number of newlines because `rsplit` removed one newline
@@ -48,10 +48,10 @@ class SingleTokenDiagnosticTranslator
 
  private:
   [[nodiscard]] auto SynthesizeFilename() const -> std::string {
-    return llvm::formatv("`{0}`", token);
+    return llvm::formatv("`{0}`", token_);
   }
 
-  llvm::StringRef token;
+  llvm::StringRef token_;
 };
 
 }  // namespace Carbon::Testing

+ 9 - 9
toolchain/lexer/token_kind.cpp

@@ -13,7 +13,7 @@ auto TokenKind::Name() const -> llvm::StringRef {
 #define CARBON_TOKEN(TokenName) #TokenName,
 #include "toolchain/lexer/token_registry.def"
   };
-  return Names[static_cast<int>(kind_value)];
+  return Names[static_cast<int>(kind_value_)];
 }
 
 auto TokenKind::IsSymbol() const -> bool {
@@ -22,7 +22,7 @@ auto TokenKind::IsSymbol() const -> bool {
 #define CARBON_SYMBOL_TOKEN(TokenName, Spelling) true,
 #include "toolchain/lexer/token_registry.def"
   };
-  return Table[static_cast<int>(kind_value)];
+  return Table[static_cast<int>(kind_value_)];
 }
 
 auto TokenKind::IsGroupingSymbol() const -> bool {
@@ -34,7 +34,7 @@ auto TokenKind::IsGroupingSymbol() const -> bool {
   true,
 #include "toolchain/lexer/token_registry.def"
   };
-  return Table[static_cast<int>(kind_value)];
+  return Table[static_cast<int>(kind_value_)];
 }
 
 auto TokenKind::IsOpeningSymbol() const -> bool {
@@ -44,7 +44,7 @@ auto TokenKind::IsOpeningSymbol() const -> bool {
   true,
 #include "toolchain/lexer/token_registry.def"
   };
-  return Table[static_cast<int>(kind_value)];
+  return Table[static_cast<int>(kind_value_)];
 }
 
 auto TokenKind::GetClosingSymbol() const -> TokenKind {
@@ -54,7 +54,7 @@ auto TokenKind::GetClosingSymbol() const -> TokenKind {
   ClosingName(),
 #include "toolchain/lexer/token_registry.def"
   };
-  auto result = Table[static_cast<int>(kind_value)];
+  auto result = Table[static_cast<int>(kind_value_)];
   assert(result != Error() && "Only opening symbols are valid!");
   return result;
 }
@@ -66,7 +66,7 @@ auto TokenKind::IsClosingSymbol() const -> bool {
   true,
 #include "toolchain/lexer/token_registry.def"
   };
-  return Table[static_cast<int>(kind_value)];
+  return Table[static_cast<int>(kind_value_)];
 }
 
 auto TokenKind::GetOpeningSymbol() const -> TokenKind {
@@ -76,7 +76,7 @@ auto TokenKind::GetOpeningSymbol() const -> TokenKind {
   OpeningName(),
 #include "toolchain/lexer/token_registry.def"
   };
-  auto result = Table[static_cast<int>(kind_value)];
+  auto result = Table[static_cast<int>(kind_value_)];
   assert(result != Error() && "Only closing symbols are valid!");
   return result;
 }
@@ -87,7 +87,7 @@ auto TokenKind::IsKeyword() const -> bool {
 #define CARBON_KEYWORD_TOKEN(TokenName, Spelling) true,
 #include "toolchain/lexer/token_registry.def"
   };
-  return Table[static_cast<int>(kind_value)];
+  return Table[static_cast<int>(kind_value_)];
 }
 
 auto TokenKind::IsSizedTypeLiteral() const -> bool {
@@ -103,7 +103,7 @@ auto TokenKind::GetFixedSpelling() const -> llvm::StringRef {
 #define CARBON_KEYWORD_TOKEN(TokenName, Spelling) Spelling,
 #include "toolchain/lexer/token_registry.def"
   };
-  return Table[static_cast<int>(kind_value)];
+  return Table[static_cast<int>(kind_value_)];
 }
 
 }  // namespace Carbon

+ 5 - 5
toolchain/lexer/token_kind.h

@@ -35,10 +35,10 @@ class TokenKind {
   TokenKind() = delete;
 
   friend auto operator==(TokenKind lhs, TokenKind rhs) -> bool {
-    return lhs.kind_value == rhs.kind_value;
+    return lhs.kind_value_ == rhs.kind_value_;
   }
   friend auto operator!=(TokenKind lhs, TokenKind rhs) -> bool {
-    return lhs.kind_value != rhs.kind_value;
+    return lhs.kind_value_ != rhs.kind_value_;
   }
 
   // Get a friendly name for the token for logging or debugging.
@@ -97,12 +97,12 @@ class TokenKind {
   // to enable usage in `switch` and `case`. The enum remains private and
   // nothing else should be using this.
   // NOLINTNEXTLINE(google-explicit-constructor)
-  constexpr operator KindEnum() const { return kind_value; }
+  constexpr operator KindEnum() const { return kind_value_; }
 
  private:
-  constexpr explicit TokenKind(KindEnum kind_value) : kind_value(kind_value) {}
+  constexpr explicit TokenKind(KindEnum kind_value) : kind_value_(kind_value) {}
 
-  KindEnum kind_value;
+  KindEnum kind_value_;
 };
 
 }  // namespace Carbon

+ 52 - 50
toolchain/lexer/tokenized_buffer.cpp

@@ -148,8 +148,8 @@ class TokenizedBuffer::Lexer {
   }
 
   auto NoteWhitespace() -> void {
-    if (!buffer.token_infos.empty()) {
-      buffer.token_infos.back().has_trailing_space = true;
+    if (!buffer.token_infos_.empty()) {
+      buffer.token_infos_.back().has_trailing_space = true;
     }
   }
 
@@ -244,8 +244,8 @@ class TokenizedBuffer::Lexer {
                                         .token_line = current_line,
                                         .column = int_column});
           buffer.GetTokenInfo(token).literal_index =
-              buffer.literal_int_storage.size();
-          buffer.literal_int_storage.push_back(std::move(value.value));
+              buffer.literal_int_storage_.size();
+          buffer.literal_int_storage_.push_back(std::move(value.value));
           return token;
         },
         [&](LexedNumericLiteral::RealValue&& value) {
@@ -253,9 +253,9 @@ class TokenizedBuffer::Lexer {
                                         .token_line = current_line,
                                         .column = int_column});
           buffer.GetTokenInfo(token).literal_index =
-              buffer.literal_int_storage.size();
-          buffer.literal_int_storage.push_back(std::move(value.mantissa));
-          buffer.literal_int_storage.push_back(std::move(value.exponent));
+              buffer.literal_int_storage_.size();
+          buffer.literal_int_storage_.push_back(std::move(value.mantissa));
+          buffer.literal_int_storage_.push_back(std::move(value.exponent));
           assert(buffer.GetRealLiteral(token).IsDecimal() ==
                  (value.radix == 10));
           return token;
@@ -309,8 +309,8 @@ class TokenizedBuffer::Lexer {
                                   .token_line = string_line,
                                   .column = string_column});
     buffer.GetTokenInfo(token).literal_index =
-        buffer.literal_string_storage.size();
-    buffer.literal_string_storage.push_back(literal->ComputeValue(emitter));
+        buffer.literal_string_storage_.size();
+    buffer.literal_string_storage_.push_back(literal->ComputeValue(emitter));
     return token;
   }
 
@@ -406,8 +406,8 @@ class TokenizedBuffer::Lexer {
     auto token = buffer.AddToken(
         {.kind = *kind, .token_line = current_line, .column = column});
     buffer.GetTokenInfo(token).literal_index =
-        buffer.literal_int_storage.size();
-    buffer.literal_int_storage.push_back(std::move(suffix_value));
+        buffer.literal_int_storage_.size();
+    buffer.literal_int_storage_.push_back(std::move(suffix_value));
     return token;
   }
 
@@ -447,10 +447,10 @@ class TokenizedBuffer::Lexer {
   }
 
   auto GetOrCreateIdentifier(llvm::StringRef text) -> Identifier {
-    auto insert_result = buffer.identifier_map.insert(
-        {text, Identifier(buffer.identifier_infos.size())});
+    auto insert_result = buffer.identifier_map_.insert(
+        {text, Identifier(buffer.identifier_infos_.size())});
     if (insert_result.second) {
-      buffer.identifier_infos.push_back({text});
+      buffer.identifier_infos_.push_back({text});
     }
     return insert_result.first->second;
   }
@@ -573,7 +573,7 @@ auto TokenizedBuffer::Lex(SourceBuffer& source, DiagnosticConsumer& consumer)
   lexer.AddEndOfFileToken();
 
   if (error_tracking_consumer.SeenError()) {
-    buffer.has_errors = true;
+    buffer.has_errors_ = true;
   }
 
   return buffer;
@@ -605,7 +605,7 @@ auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
   if (token_info.kind == TokenKind::Error()) {
     auto& line_info = GetLineInfo(token_info.token_line);
     int64_t token_start = line_info.start + token_info.column;
-    return source->Text().substr(token_start, token_info.error_length);
+    return source_->Text().substr(token_start, token_info.error_length);
   }
 
   // Refer back to the source text to preserve oddities like radix or digit
@@ -615,7 +615,7 @@ auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
     auto& line_info = GetLineInfo(token_info.token_line);
     int64_t token_start = line_info.start + token_info.column;
     llvm::Optional<LexedNumericLiteral> relexed_token =
-        LexedNumericLiteral::Lex(source->Text().substr(token_start));
+        LexedNumericLiteral::Lex(source_->Text().substr(token_start));
     assert(relexed_token && "Could not reform numeric literal token.");
     return relexed_token->Text();
   }
@@ -626,7 +626,7 @@ auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
     auto& line_info = GetLineInfo(token_info.token_line);
     int64_t token_start = line_info.start + token_info.column;
     llvm::Optional<LexedStringLiteral> relexed_token =
-        LexedStringLiteral::Lex(source->Text().substr(token_start));
+        LexedStringLiteral::Lex(source_->Text().substr(token_start));
     assert(relexed_token && "Could not reform string literal token.");
     return relexed_token->Text();
   }
@@ -637,7 +637,7 @@ auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
     auto& line_info = GetLineInfo(token_info.token_line);
     int64_t token_start = line_info.start + token_info.column;
     llvm::StringRef suffix =
-        source->Text().substr(token_start + 1).take_while(IsDecimalDigit);
+        source_->Text().substr(token_start + 1).take_while(IsDecimalDigit);
     return llvm::StringRef(suffix.data() - 1, suffix.size() + 1);
   }
 
@@ -662,7 +662,7 @@ auto TokenizedBuffer::GetIntegerLiteral(Token token) const
   auto& token_info = GetTokenInfo(token);
   assert(token_info.kind == TokenKind::IntegerLiteral() &&
          "The token must be an integer literal!");
-  return literal_int_storage[token_info.literal_index];
+  return literal_int_storage_[token_info.literal_index];
 }
 
 auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealLiteralValue {
@@ -675,7 +675,7 @@ auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealLiteralValue {
   // or hexadecimal literal.
   auto& line_info = GetLineInfo(token_info.token_line);
   int64_t token_start = line_info.start + token_info.column;
-  char second_char = source->Text()[token_start + 1];
+  char second_char = source_->Text()[token_start + 1];
   bool is_decimal = second_char != 'x' && second_char != 'b';
 
   return RealLiteralValue(this, token_info.literal_index, is_decimal);
@@ -685,7 +685,7 @@ auto TokenizedBuffer::GetStringLiteral(Token token) const -> llvm::StringRef {
   auto& token_info = GetTokenInfo(token);
   assert(token_info.kind == TokenKind::StringLiteral() &&
          "The token must be a string literal!");
-  return literal_string_storage[token_info.literal_index];
+  return literal_string_storage_[token_info.literal_index];
 }
 
 auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
@@ -693,7 +693,7 @@ auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
   auto& token_info = GetTokenInfo(token);
   assert(token_info.kind.IsSizedTypeLiteral() &&
          "The token must be a sized type literal!");
-  return literal_int_storage[token_info.literal_index];
+  return literal_int_storage_[token_info.literal_index];
 }
 
 auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
@@ -726,7 +726,7 @@ auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
 }
 
 auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
-  return line.index + 1;
+  return line.index_ + 1;
 }
 
 auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
@@ -735,7 +735,7 @@ auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
 
 auto TokenizedBuffer::GetIdentifierText(Identifier identifier) const
     -> llvm::StringRef {
-  return identifier_infos[identifier.index].text;
+  return identifier_infos_[identifier.index_].text;
 }
 
 auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
@@ -762,7 +762,7 @@ static auto ComputeDecimalPrintedWidth(int number) -> int {
 
 auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
   PrintWidths widths = {};
-  widths.index = ComputeDecimalPrintedWidth(token_infos.size());
+  widths.index = ComputeDecimalPrintedWidth(token_infos_.size());
   widths.kind = GetKind(token).Name().size();
   widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
   widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
@@ -777,7 +777,7 @@ auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
   }
 
   PrintWidths widths = {};
-  widths.index = ComputeDecimalPrintedWidth((token_infos.size()));
+  widths.index = ComputeDecimalPrintedWidth((token_infos_.size()));
   for (Token token : Tokens()) {
     widths.Widen(GetTokenPrintWidths(token));
   }
@@ -796,7 +796,7 @@ auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
 auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
                                  PrintWidths widths) const -> void {
   widths.Widen(GetTokenPrintWidths(token));
-  int token_index = token.index;
+  int token_index = token.index_;
   auto& token_info = GetTokenInfo(token);
   llvm::StringRef token_text = GetTokenText(token);
 
@@ -817,11 +817,13 @@ auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
       token_text);
 
   if (token_info.kind == TokenKind::Identifier()) {
-    output_stream << ", identifier: " << GetIdentifier(token).index;
+    output_stream << ", identifier: " << GetIdentifier(token).index_;
   } else if (token_info.kind.IsOpeningSymbol()) {
-    output_stream << ", closing_token: " << GetMatchedClosingToken(token).index;
+    output_stream << ", closing_token: "
+                  << GetMatchedClosingToken(token).index_;
   } else if (token_info.kind.IsClosingSymbol()) {
-    output_stream << ", opening_token: " << GetMatchedOpeningToken(token).index;
+    output_stream << ", opening_token: "
+                  << GetMatchedOpeningToken(token).index_;
   } else if (token_info.kind == TokenKind::StringLiteral()) {
     output_stream << ", value: `" << GetStringLiteral(token) << "`";
   }
@@ -838,56 +840,56 @@ auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
 }
 
 auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
-  return line_infos[line.index];
+  return line_infos_[line.index_];
 }
 
 auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
-  return line_infos[line.index];
+  return line_infos_[line.index_];
 }
 
 auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
-  line_infos.push_back(info);
-  return Line(static_cast<int>(line_infos.size()) - 1);
+  line_infos_.push_back(info);
+  return Line(static_cast<int>(line_infos_.size()) - 1);
 }
 
 auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
-  return token_infos[token.index];
+  return token_infos_[token.index_];
 }
 
 auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
-  return token_infos[token.index];
+  return token_infos_[token.index_];
 }
 
 auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
-  token_infos.push_back(info);
-  return Token(static_cast<int>(token_infos.size()) - 1);
+  token_infos_.push_back(info);
+  return Token(static_cast<int>(token_infos_.size()) - 1);
 }
 
 auto TokenizedBuffer::TokenIterator::Print(llvm::raw_ostream& output) const
     -> void {
-  output << token.index;
+  output << token_.index_;
 }
 
 auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
     const char* loc) -> Diagnostic::Location {
-  assert(llvm::is_sorted(std::array{buffer_->source->Text().begin(), loc,
-                                    buffer_->source->Text().end()}) &&
+  assert(llvm::is_sorted(std::array{buffer_->source_->Text().begin(), loc,
+                                    buffer_->source_->Text().end()}) &&
          "location not within buffer");
-  int64_t offset = loc - buffer_->source->Text().begin();
+  int64_t offset = loc - buffer_->source_->Text().begin();
 
   // Find the first line starting after the given location. Note that we can't
   // inspect `line.length` here because it is not necessarily correct for the
   // final line.
   auto line_it = std::partition_point(
-      buffer_->line_infos.begin(), buffer_->line_infos.end(),
+      buffer_->line_infos_.begin(), buffer_->line_infos_.end(),
       [offset](const LineInfo& line) { return line.start <= offset; });
-  bool incomplete_line_info = line_it == buffer_->line_infos.end();
+  bool incomplete_line_info = line_it == buffer_->line_infos_.end();
 
   // Step back one line to find the line containing the given position.
-  assert(line_it != buffer_->line_infos.begin() &&
+  assert(line_it != buffer_->line_infos_.begin() &&
          "location precedes the start of the first line");
   --line_it;
-  int line_number = line_it - buffer_->line_infos.begin();
+  int line_number = line_it - buffer_->line_infos_.begin();
   int column_number = offset - line_it->start;
 
   // We might still be lexing the last line. If so, check to see if there are
@@ -896,7 +898,7 @@ auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
   if (incomplete_line_info) {
     column_number = 0;
     for (int64_t i = line_it->start; i != offset; ++i) {
-      if (buffer_->source->Text()[i] == '\n') {
+      if (buffer_->source_->Text()[i] == '\n') {
         ++line_number;
         column_number = 0;
       } else {
@@ -905,7 +907,7 @@ auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
     }
   }
 
-  return {.file_name = buffer_->source->Filename().str(),
+  return {.file_name = buffer_->source_->Filename().str(),
           .line_number = line_number + 1,
           .column_number = column_number + 1};
 }
@@ -916,7 +918,7 @@ auto TokenizedBuffer::TokenLocationTranslator::GetLocation(Token token)
   auto& token_info = buffer_->GetTokenInfo(token);
   auto& line_info = buffer_->GetLineInfo(token_info.token_line);
   const char* token_start =
-      buffer_->source->Text().begin() + line_info.start + token_info.column;
+      buffer_->source_->Text().begin() + line_info.start + token_info.column;
 
   // Find the corresponding file location.
   // TODO: Should we somehow indicate in the diagnostic location if this token

+ 50 - 50
toolchain/lexer/tokenized_buffer.h

@@ -50,30 +50,30 @@ class TokenizedBufferToken {
   TokenizedBufferToken() = default;
 
   friend auto operator==(Token lhs, Token rhs) -> bool {
-    return lhs.index == rhs.index;
+    return lhs.index_ == rhs.index_;
   }
   friend auto operator!=(Token lhs, Token rhs) -> bool {
-    return lhs.index != rhs.index;
+    return lhs.index_ != rhs.index_;
   }
   friend auto operator<(Token lhs, Token rhs) -> bool {
-    return lhs.index < rhs.index;
+    return lhs.index_ < rhs.index_;
   }
   friend auto operator<=(Token lhs, Token rhs) -> bool {
-    return lhs.index <= rhs.index;
+    return lhs.index_ <= rhs.index_;
   }
   friend auto operator>(Token lhs, Token rhs) -> bool {
-    return lhs.index > rhs.index;
+    return lhs.index_ > rhs.index_;
   }
   friend auto operator>=(Token lhs, Token rhs) -> bool {
-    return lhs.index >= rhs.index;
+    return lhs.index_ >= rhs.index_;
   }
 
  private:
   friend TokenizedBuffer;
 
-  explicit TokenizedBufferToken(int index) : index(index) {}
+  explicit TokenizedBufferToken(int index) : index_(index) {}
 
-  int32_t index;
+  int32_t index_;
 };
 
 }  // namespace Internal
@@ -107,30 +107,30 @@ class TokenizedBuffer {
     Line() = default;
 
     friend auto operator==(Line lhs, Line rhs) -> bool {
-      return lhs.index == rhs.index;
+      return lhs.index_ == rhs.index_;
     }
     friend auto operator!=(Line lhs, Line rhs) -> bool {
-      return lhs.index != rhs.index;
+      return lhs.index_ != rhs.index_;
     }
     friend auto operator<(Line lhs, Line rhs) -> bool {
-      return lhs.index < rhs.index;
+      return lhs.index_ < rhs.index_;
     }
     friend auto operator<=(Line lhs, Line rhs) -> bool {
-      return lhs.index <= rhs.index;
+      return lhs.index_ <= rhs.index_;
     }
     friend auto operator>(Line lhs, Line rhs) -> bool {
-      return lhs.index > rhs.index;
+      return lhs.index_ > rhs.index_;
     }
     friend auto operator>=(Line lhs, Line rhs) -> bool {
-      return lhs.index >= rhs.index;
+      return lhs.index_ >= rhs.index_;
     }
 
    private:
     friend class TokenizedBuffer;
 
-    explicit Line(int index) : index(index) {}
+    explicit Line(int index) : index_(index) {}
 
-    int32_t index;
+    int32_t index_;
   };
 
   // A lightweight handle to a lexed identifier in a `TokenizedBuffer`.
@@ -151,18 +151,18 @@ class TokenizedBuffer {
     // Most normal APIs are provided by the `TokenizedBuffer`, we just support
     // basic comparison operations.
     friend auto operator==(Identifier lhs, Identifier rhs) -> bool {
-      return lhs.index == rhs.index;
+      return lhs.index_ == rhs.index_;
     }
     friend auto operator!=(Identifier lhs, Identifier rhs) -> bool {
-      return lhs.index != rhs.index;
+      return lhs.index_ != rhs.index_;
     }
 
    private:
     friend class TokenizedBuffer;
 
-    explicit Identifier(int index) : index(index) {}
+    explicit Identifier(int index) : index_(index) {}
 
-    int32_t index;
+    int32_t index_;
   };
 
   // Random-access iterator over tokens within the buffer.
@@ -172,28 +172,28 @@ class TokenizedBuffer {
    public:
     TokenIterator() = default;
 
-    explicit TokenIterator(Token token) : token(token) {}
+    explicit TokenIterator(Token token) : token_(token) {}
 
     auto operator==(const TokenIterator& rhs) const -> bool {
-      return token == rhs.token;
+      return token_ == rhs.token_;
     }
     auto operator<(const TokenIterator& rhs) const -> bool {
-      return token < rhs.token;
+      return token_ < rhs.token_;
     }
 
-    auto operator*() const -> const Token& { return token; }
+    auto operator*() const -> const Token& { return token_; }
 
     using iterator_facade_base::operator-;
     auto operator-(const TokenIterator& rhs) const -> int {
-      return token.index - rhs.token.index;
+      return token_.index_ - rhs.token_.index_;
     }
 
     auto operator+=(int n) -> TokenIterator& {
-      token.index += n;
+      token_.index_ += n;
       return *this;
     }
     auto operator-=(int n) -> TokenIterator& {
-      token.index -= n;
+      token_.index_ -= n;
       return *this;
     }
 
@@ -203,7 +203,7 @@ class TokenizedBuffer {
    private:
     friend class TokenizedBuffer;
 
-    Token token;
+    Token token_;
   };
 
   // The value of a real literal.
@@ -214,31 +214,31 @@ class TokenizedBuffer {
   // The `TokenizedBuffer` must outlive any `RealLiteralValue`s referring to
   // its tokens.
   class RealLiteralValue {
-    const TokenizedBuffer* buffer;
-    int32_t literal_index;
-    bool is_decimal;
-
    public:
     // The mantissa, represented as an unsigned integer.
     [[nodiscard]] auto Mantissa() const -> const llvm::APInt& {
-      return buffer->literal_int_storage[literal_index];
+      return buffer_->literal_int_storage_[literal_index_];
     }
     // The exponent, represented as a signed integer.
     [[nodiscard]] auto Exponent() const -> const llvm::APInt& {
-      return buffer->literal_int_storage[literal_index + 1];
+      return buffer_->literal_int_storage_[literal_index_ + 1];
     }
     // If false, the value is mantissa * 2^exponent.
     // If true, the value is mantissa * 10^exponent.
-    [[nodiscard]] auto IsDecimal() const -> bool { return is_decimal; }
+    [[nodiscard]] auto IsDecimal() const -> bool { return is_decimal_; }
 
    private:
     friend class TokenizedBuffer;
 
     RealLiteralValue(const TokenizedBuffer* buffer, int32_t literal_index,
                      bool is_decimal)
-        : buffer(buffer),
-          literal_index(literal_index),
-          is_decimal(is_decimal) {}
+        : buffer_(buffer),
+          literal_index_(literal_index),
+          is_decimal_(is_decimal) {}
+
+    const TokenizedBuffer* buffer_;
+    int32_t literal_index_;
+    bool is_decimal_;
   };
 
   // A diagnostic location translator that maps token locations into source
@@ -264,14 +264,14 @@ class TokenizedBuffer {
       -> TokenizedBuffer;
 
   // Returns true if the buffer has errors that are detectable at lexing time.
-  [[nodiscard]] auto HasErrors() const -> bool { return has_errors; }
+  [[nodiscard]] auto HasErrors() const -> bool { return has_errors_; }
 
   [[nodiscard]] auto Tokens() const -> llvm::iterator_range<TokenIterator> {
     return llvm::make_range(TokenIterator(Token(0)),
-                            TokenIterator(Token(token_infos.size())));
+                            TokenIterator(Token(token_infos_.size())));
   }
 
-  [[nodiscard]] auto Size() const -> int { return token_infos.size(); }
+  [[nodiscard]] auto Size() const -> int { return token_infos_.size(); }
 
   [[nodiscard]] auto GetKind(Token token) const -> TokenKind;
   [[nodiscard]] auto GetLine(Token token) const -> Line;
@@ -442,7 +442,7 @@ class TokenizedBuffer {
   // members. A working object of this type is built with the `lex` function
   // above so that its return can indicate if an error was encountered while
   // lexing.
-  explicit TokenizedBuffer(SourceBuffer& source) : source(&source) {}
+  explicit TokenizedBuffer(SourceBuffer& source) : source_(&source) {}
 
   auto GetLineInfo(Line line) -> LineInfo&;
   [[nodiscard]] auto GetLineInfo(Line line) const -> const LineInfo&;
@@ -454,23 +454,23 @@ class TokenizedBuffer {
   auto PrintToken(llvm::raw_ostream& output_stream, Token token,
                   PrintWidths widths) const -> void;
 
-  SourceBuffer* source;
+  SourceBuffer* source_;
 
-  llvm::SmallVector<TokenInfo, 16> token_infos;
+  llvm::SmallVector<TokenInfo, 16> token_infos_;
 
-  llvm::SmallVector<LineInfo, 16> line_infos;
+  llvm::SmallVector<LineInfo, 16> line_infos_;
 
-  llvm::SmallVector<IdentifierInfo, 16> identifier_infos;
+  llvm::SmallVector<IdentifierInfo, 16> identifier_infos_;
 
   // Storage for integers that form part of the value of a numeric or type
   // literal.
-  llvm::SmallVector<llvm::APInt, 16> literal_int_storage;
+  llvm::SmallVector<llvm::APInt, 16> literal_int_storage_;
 
-  llvm::SmallVector<std::string, 16> literal_string_storage;
+  llvm::SmallVector<std::string, 16> literal_string_storage_;
 
-  llvm::DenseMap<llvm::StringRef, Identifier> identifier_map;
+  llvm::DenseMap<llvm::StringRef, Identifier> identifier_map_;
 
-  bool has_errors = false;
+  bool has_errors_ = false;
 };
 
 // A diagnostic emitter that uses positions within a source buffer's text as

+ 4 - 3
toolchain/lexer/tokenized_buffer_test.cpp

@@ -34,9 +34,8 @@ using ::testing::HasSubstr;
 using ::testing::StrEq;
 namespace Yaml = Carbon::Testing::Yaml;
 
-struct LexerTest : ::testing::Test {
-  llvm::SmallVector<SourceBuffer, 16> source_storage;
-
+class LexerTest : public ::testing::Test {
+ protected:
   auto GetSourceBuffer(llvm::Twine text) -> SourceBuffer& {
     source_storage.push_back(SourceBuffer::CreateFromText(text.str()));
     return source_storage.back();
@@ -47,6 +46,8 @@ struct LexerTest : ::testing::Test {
       -> TokenizedBuffer {
     return TokenizedBuffer::Lex(GetSourceBuffer(text), consumer);
   }
+
+  llvm::SmallVector<SourceBuffer, 16> source_storage;
 };
 
 TEST_F(LexerTest, HandlesEmptyBuffer) {

+ 1 - 1
toolchain/parser/parse_node_kind.cpp

@@ -13,7 +13,7 @@ auto ParseNodeKind::GetName() const -> llvm::StringRef {
 #define CARBON_PARSE_NODE_KIND(Name) #Name,
 #include "toolchain/parser/parse_node_kind.def"
   };
-  return Names[static_cast<int>(kind)];
+  return Names[static_cast<int>(kind_)];
 }
 
 }  // namespace Carbon

+ 5 - 5
toolchain/parser/parse_node_kind.h

@@ -45,10 +45,10 @@ class ParseNodeKind {
   ParseNodeKind() = delete;
 
   friend auto operator==(ParseNodeKind lhs, ParseNodeKind rhs) -> bool {
-    return lhs.kind == rhs.kind;
+    return lhs.kind_ == rhs.kind_;
   }
   friend auto operator!=(ParseNodeKind lhs, ParseNodeKind rhs) -> bool {
-    return lhs.kind != rhs.kind;
+    return lhs.kind_ != rhs.kind_;
   }
 
   // Gets a friendly name for the token for logging or debugging.
@@ -58,12 +58,12 @@ class ParseNodeKind {
   // to enable usage in `switch` and `case`. The enum remains private and
   // nothing else should be using this.
   // NOLINTNEXTLINE(google-explicit-constructor)
-  constexpr operator KindEnum() const { return kind; }
+  constexpr operator KindEnum() const { return kind_; }
 
  private:
-  constexpr explicit ParseNodeKind(KindEnum k) : kind(k) {}
+  constexpr explicit ParseNodeKind(KindEnum k) : kind_(k) {}
 
-  KindEnum kind;
+  KindEnum kind_;
 };
 
 // We expect the parse node kind to fit compactly into 8 bits.

+ 4 - 4
toolchain/parser/parse_test_helpers.h

@@ -54,7 +54,7 @@ class ExpectedNodesMatcher
  public:
   explicit ExpectedNodesMatcher(
       llvm::SmallVector<ExpectedNode, 0> expected_nodess)
-      : expected_nodes(std::move(expected_nodess)) {}
+      : expected_nodes_(std::move(expected_nodess)) {}
 
   auto MatchAndExplain(const ParseTree& tree,
                        ::testing::MatchResultListener* output_ptr) const
@@ -66,7 +66,7 @@ class ExpectedNodesMatcher
                          int postorder_index, const ExpectedNode& expected_node,
                          ::testing::MatchResultListener& output) const -> bool;
 
-  llvm::SmallVector<ExpectedNode, 0> expected_nodes;
+  llvm::SmallVector<ExpectedNode, 0> expected_nodes_;
 };
 
 // Implementation of the Google Mock interface for matching (and explaining any
@@ -81,7 +81,7 @@ inline auto ExpectedNodesMatcher::MatchAndExplain(
   const auto nodes_end = rpo.end();
   auto nodes_it = nodes_begin;
   llvm::SmallVector<const ExpectedNode*, 16> expected_node_stack;
-  for (const ExpectedNode& en : expected_nodes) {
+  for (const ExpectedNode& en : expected_nodes_) {
     expected_node_stack.push_back(&en);
   }
   while (!expected_node_stack.empty()) {
@@ -169,7 +169,7 @@ inline auto ExpectedNodesMatcher::DescribeTo(std::ostream* output_ptr) const
   // of the actual parse tree.
   llvm::SmallVector<std::pair<const ExpectedNode*, int>, 16>
       expected_node_stack;
-  for (const ExpectedNode& expected_node : llvm::reverse(expected_nodes)) {
+  for (const ExpectedNode& expected_node : llvm::reverse(expected_nodes_)) {
     expected_node_stack.push_back({&expected_node, 0});
   }
 

+ 22 - 20
toolchain/parser/parse_tree.cpp

@@ -30,45 +30,46 @@ auto ParseTree::Parse(TokenizedBuffer& tokens, DiagnosticConsumer& consumer)
 
 auto ParseTree::Postorder() const -> llvm::iterator_range<PostorderIterator> {
   return {PostorderIterator(Node(0)),
-          PostorderIterator(Node(node_impls.size()))};
+          PostorderIterator(Node(node_impls_.size()))};
 }
 
 auto ParseTree::Postorder(Node n) const
     -> llvm::iterator_range<PostorderIterator> {
   // The postorder ends after this node, the root, and begins at the start of
   // its subtree.
-  int end_index = n.index + 1;
-  int start_index = end_index - node_impls[n.index].subtree_size;
+  int end_index = n.index_ + 1;
+  int start_index = end_index - node_impls_[n.index_].subtree_size;
   return {PostorderIterator(Node(start_index)),
           PostorderIterator(Node(end_index))};
 }
 
 auto ParseTree::Children(Node n) const
     -> llvm::iterator_range<SiblingIterator> {
-  int end_index = n.index - node_impls[n.index].subtree_size;
-  return {SiblingIterator(*this, Node(n.index - 1)),
+  int end_index = n.index_ - node_impls_[n.index_].subtree_size;
+  return {SiblingIterator(*this, Node(n.index_ - 1)),
           SiblingIterator(*this, Node(end_index))};
 }
 
 auto ParseTree::Roots() const -> llvm::iterator_range<SiblingIterator> {
-  return {SiblingIterator(*this, Node(static_cast<int>(node_impls.size()) - 1)),
-          SiblingIterator(*this, Node(-1))};
+  return {
+      SiblingIterator(*this, Node(static_cast<int>(node_impls_.size()) - 1)),
+      SiblingIterator(*this, Node(-1))};
 }
 
 auto ParseTree::HasErrorInNode(Node n) const -> bool {
-  return node_impls[n.index].has_error;
+  return node_impls_[n.index_].has_error;
 }
 
 auto ParseTree::GetNodeKind(Node n) const -> ParseNodeKind {
-  return node_impls[n.index].kind;
+  return node_impls_[n.index_].kind;
 }
 
 auto ParseTree::GetNodeToken(Node n) const -> TokenizedBuffer::Token {
-  return node_impls[n.index].token;
+  return node_impls_[n.index_].token;
 }
 
 auto ParseTree::GetNodeText(Node n) const -> llvm::StringRef {
-  return tokens->GetTokenText(node_impls[n.index].token);
+  return tokens_->GetTokenText(node_impls_[n.index_].token);
 }
 
 auto ParseTree::Print(llvm::raw_ostream& output) const -> void {
@@ -90,15 +91,16 @@ auto ParseTree::Print(llvm::raw_ostream& output) const -> void {
     Node n;
     int depth;
     std::tie(n, depth) = node_stack.pop_back_val();
-    auto& n_impl = node_impls[n.GetIndex()];
+    auto& n_impl = node_impls_[n.GetIndex()];
 
     for (int unused_indent : llvm::seq(0, depth)) {
       (void)unused_indent;
       output << "  ";
     }
 
-    output << "{node_index: " << n.index << ", kind: '" << n_impl.kind.GetName()
-           << "', text: '" << tokens->GetTokenText(n_impl.token) << "'";
+    output << "{node_index: " << n.index_ << ", kind: '"
+           << n_impl.kind.GetName() << "', text: '"
+           << tokens_->GetTokenText(n_impl.token) << "'";
 
     if (n_impl.has_error) {
       output << ", has_error: yes";
@@ -139,9 +141,9 @@ auto ParseTree::Verify() const -> bool {
   // Verify basic tree structure invariants.
   llvm::SmallVector<ParseTree::Node, 16> ancestors;
   for (Node n : llvm::reverse(Postorder())) {
-    auto& n_impl = node_impls[n.GetIndex()];
+    auto& n_impl = node_impls_[n.GetIndex()];
 
-    if (n_impl.has_error && !has_errors) {
+    if (n_impl.has_error && !has_errors_) {
       llvm::errs()
           << "Node #" << n.GetIndex()
           << " has errors, but the tree is not marked as having any.\n";
@@ -151,7 +153,7 @@ auto ParseTree::Verify() const -> bool {
     if (n_impl.subtree_size > 1) {
       if (!ancestors.empty()) {
         auto parent_n = ancestors.back();
-        auto& parent_n_impl = node_impls[parent_n.GetIndex()];
+        auto& parent_n_impl = node_impls_[parent_n.GetIndex()];
         int end_index = n.GetIndex() - n_impl.subtree_size;
         int parent_end_index = parent_n.GetIndex() - parent_n_impl.subtree_size;
         if (parent_end_index > end_index) {
@@ -181,7 +183,7 @@ auto ParseTree::Verify() const -> bool {
     while (!ancestors.empty()) {
       ParseTree::Node parent_n = ancestors.back();
       if ((parent_n.GetIndex() -
-           node_impls[parent_n.GetIndex()].subtree_size) != next_index) {
+           node_impls_[parent_n.GetIndex()].subtree_size) != next_index) {
         break;
       }
       ancestors.pop_back();
@@ -205,12 +207,12 @@ auto ParseTree::Node::Print(llvm::raw_ostream& output) const -> void {
 
 auto ParseTree::PostorderIterator::Print(llvm::raw_ostream& output) const
     -> void {
-  output << node.GetIndex();
+  output << node_.GetIndex();
 }
 
 auto ParseTree::SiblingIterator::Print(llvm::raw_ostream& output) const
     -> void {
-  output << node.GetIndex();
+  output << node_.GetIndex();
 }
 
 }  // namespace Carbon

+ 30 - 30
toolchain/parser/parse_tree.h

@@ -53,10 +53,10 @@ class ParseTree {
       -> ParseTree;
 
   // Tests whether there are any errors in the parse tree.
-  [[nodiscard]] auto HasErrors() const -> bool { return has_errors; }
+  [[nodiscard]] auto HasErrors() const -> bool { return has_errors_; }
 
   // Returns the number of nodes in this parse tree.
-  [[nodiscard]] auto Size() const -> int { return node_impls.size(); }
+  [[nodiscard]] auto Size() const -> int { return node_impls_.size(); }
 
   // Returns an iterable range over the parse tree nodes in depth-first
   // postorder.
@@ -192,12 +192,12 @@ class ParseTree {
 
   // Wires up the reference to the tokenized buffer. The global `parse` routine
   // should be used to actually parse the tokens into a tree.
-  explicit ParseTree(TokenizedBuffer& tokens_arg) : tokens(&tokens_arg) {}
+  explicit ParseTree(TokenizedBuffer& tokens_arg) : tokens_(&tokens_arg) {}
 
   // Depth-first postorder sequence of node implementation data.
-  llvm::SmallVector<NodeImpl, 0> node_impls;
+  llvm::SmallVector<NodeImpl, 0> node_impls_;
 
-  TokenizedBuffer* tokens;
+  TokenizedBuffer* tokens_;
 
   // Indicates if any errors were encountered while parsing.
   //
@@ -207,7 +207,7 @@ class ParseTree {
   // some errors were encountered somewhere. A key implication is that when this
   // is true we do *not* have the expected 1:1 mapping between tokens and parsed
   // nodes as some tokens may have been skipped.
-  bool has_errors = false;
+  bool has_errors_ = false;
 };
 
 // A lightweight handle representing a node in the tree.
@@ -227,29 +227,29 @@ class ParseTree::Node {
   Node() = default;
 
   friend auto operator==(Node lhs, Node rhs) -> bool {
-    return lhs.index == rhs.index;
+    return lhs.index_ == rhs.index_;
   }
   friend auto operator!=(Node lhs, Node rhs) -> bool {
-    return lhs.index != rhs.index;
+    return lhs.index_ != rhs.index_;
   }
   friend auto operator<(Node lhs, Node rhs) -> bool {
-    return lhs.index < rhs.index;
+    return lhs.index_ < rhs.index_;
   }
   friend auto operator<=(Node lhs, Node rhs) -> bool {
-    return lhs.index <= rhs.index;
+    return lhs.index_ <= rhs.index_;
   }
   friend auto operator>(Node lhs, Node rhs) -> bool {
-    return lhs.index > rhs.index;
+    return lhs.index_ > rhs.index_;
   }
   friend auto operator>=(Node lhs, Node rhs) -> bool {
-    return lhs.index >= rhs.index;
+    return lhs.index_ >= rhs.index_;
   }
 
   // Returns an opaque integer identifier of the node in the tree. Clients
   // should not expect any particular semantics from this value.
   //
   // FIXME: Maybe we can switch to stream operator overloads?
-  [[nodiscard]] auto GetIndex() const -> int { return index; }
+  [[nodiscard]] auto GetIndex() const -> int { return index_; }
 
   // Prints the node index.
   auto Print(llvm::raw_ostream& output) const -> void;
@@ -262,10 +262,10 @@ class ParseTree::Node {
 
   // Constructs a node with a specific index into the parse tree's postorder
   // sequence of node implementations.
-  explicit Node(int index_arg) : index(index_arg) {}
+  explicit Node(int index) : index_(index) {}
 
   // The index of this node's implementation in the postorder sequence.
-  int32_t index;
+  int32_t index_;
 };
 
 // A random-access iterator to the depth-first postorder sequence of parse nodes
@@ -282,24 +282,24 @@ class ParseTree::PostorderIterator
   PostorderIterator() = default;
 
   auto operator==(const PostorderIterator& rhs) const -> bool {
-    return node == rhs.node;
+    return node_ == rhs.node_;
   }
   auto operator<(const PostorderIterator& rhs) const -> bool {
-    return node < rhs.node;
+    return node_ < rhs.node_;
   }
 
-  auto operator*() const -> Node { return node; }
+  auto operator*() const -> Node { return node_; }
 
   auto operator-(const PostorderIterator& rhs) const -> int {
-    return node.index - rhs.node.index;
+    return node_.index_ - rhs.node_.index_;
   }
 
   auto operator+=(int offset) -> PostorderIterator& {
-    node.index += offset;
+    node_.index_ += offset;
     return *this;
   }
   auto operator-=(int offset) -> PostorderIterator& {
-    node.index -= offset;
+    node_.index_ -= offset;
     return *this;
   }
 
@@ -309,9 +309,9 @@ class ParseTree::PostorderIterator
  private:
   friend class ParseTree;
 
-  explicit PostorderIterator(Node n) : node(n) {}
+  explicit PostorderIterator(Node n) : node_(n) {}
 
-  Node node;
+  Node node_;
 };
 
 // A forward iterator across the silbings at a particular level in the parse
@@ -332,19 +332,19 @@ class ParseTree::SiblingIterator
   SiblingIterator() = default;
 
   auto operator==(const SiblingIterator& rhs) const -> bool {
-    return node == rhs.node;
+    return node_ == rhs.node_;
   }
   auto operator<(const SiblingIterator& rhs) const -> bool {
     // Note that child iterators walk in reverse compared to the postorder
     // index.
-    return node > rhs.node;
+    return node_ > rhs.node_;
   }
 
-  auto operator*() const -> Node { return node; }
+  auto operator*() const -> Node { return node_; }
 
   using iterator_facade_base::operator++;
   auto operator++() -> SiblingIterator& {
-    node.index -= std::abs(tree->node_impls[node.index].subtree_size);
+    node_.index_ -= std::abs(tree_->node_impls_[node_.index_].subtree_size);
     return *this;
   }
 
@@ -355,11 +355,11 @@ class ParseTree::SiblingIterator
   friend class ParseTree;
 
   explicit SiblingIterator(const ParseTree& tree_arg, Node n)
-      : tree(&tree_arg), node(n) {}
+      : tree_(&tree_arg), node_(n) {}
 
-  const ParseTree* tree;
+  const ParseTree* tree_;
 
-  Node node;
+  Node node_;
 };
 
 }  // namespace Carbon

+ 6 - 5
toolchain/parser/parse_tree_test.cpp

@@ -32,11 +32,8 @@ using ::testing::Ne;
 using ::testing::StrEq;
 namespace Yaml = Carbon::Testing::Yaml;
 
-struct ParseTreeTest : ::testing::Test {
-  std::forward_list<SourceBuffer> source_storage;
-  std::forward_list<TokenizedBuffer> token_storage;
-  DiagnosticConsumer& consumer = ConsoleDiagnosticConsumer();
-
+class ParseTreeTest : public ::testing::Test {
+ protected:
   auto GetSourceBuffer(llvm::Twine t) -> SourceBuffer& {
     source_storage.push_front(SourceBuffer::CreateFromText(t.str()));
     return source_storage.front();
@@ -47,6 +44,10 @@ struct ParseTreeTest : ::testing::Test {
         TokenizedBuffer::Lex(GetSourceBuffer(t), consumer));
     return token_storage.front();
   }
+
+  std::forward_list<SourceBuffer> source_storage;
+  std::forward_list<TokenizedBuffer> token_storage;
+  DiagnosticConsumer& consumer = ConsoleDiagnosticConsumer();
 };
 
 TEST_F(ParseTreeTest, Empty) {

+ 109 - 109
toolchain/parser/parser_impl.cpp

@@ -58,9 +58,6 @@ struct ExpectedStructLiteralField
     : SimpleDiagnostic<ExpectedStructLiteralField> {
   static constexpr llvm::StringLiteral ShortName = "syntax-error";
 
-  bool can_be_type;
-  bool can_be_value;
-
   auto Format() -> std::string {
     std::string result = "Expected ";
     if (can_be_type) {
@@ -75,6 +72,9 @@ struct ExpectedStructLiteralField
     result += ".";
     return result;
   }
+
+  bool can_be_type;
+  bool can_be_value;
 };
 
 struct UnrecognizedDeclaration : SimpleDiagnostic<UnrecognizedDeclaration> {
@@ -97,11 +97,11 @@ struct ExpectedParenAfter : SimpleDiagnostic<ExpectedParenAfter> {
   static constexpr llvm::StringLiteral ShortName = "syntax-error";
   static constexpr const char* Message = "Expected `(` after `{0}`.";
 
-  TokenKind introducer;
-
   auto Format() -> std::string {
     return llvm::formatv(Message, introducer.GetFixedSpelling()).str();
   }
+
+  TokenKind introducer;
 };
 
 struct ExpectedCloseParen : SimpleDiagnostic<ExpectedCloseParen> {
@@ -124,11 +124,11 @@ struct ExpectedSemiAfter : SimpleDiagnostic<ExpectedSemiAfter> {
   static constexpr llvm::StringLiteral ShortName = "syntax-error";
   static constexpr const char* Message = "Expected `;` after `{0}`.";
 
-  TokenKind preceding;
-
   auto Format() -> std::string {
     return llvm::formatv(Message, preceding.GetFixedSpelling()).str();
   }
+
+  TokenKind preceding;
 };
 
 struct ExpectedIdentifierAfterDot
@@ -143,11 +143,11 @@ struct UnexpectedTokenAfterListElement
   static constexpr llvm::StringLiteral ShortName = "syntax-error";
   static constexpr const char* Message = "Expected `,` or `{0}`.";
 
-  TokenKind close;
-
   auto Format() -> std::string {
     return llvm::formatv(Message, close.GetFixedSpelling()).str();
   }
+
+  TokenKind close;
 };
 
 struct BinaryOperatorRequiresWhitespace
@@ -156,20 +156,18 @@ struct BinaryOperatorRequiresWhitespace
   static constexpr const char* Message =
       "Whitespace missing {0} binary operator.";
 
-  bool has_leading_space;
-  bool has_trailing_space;
-
   auto Format() -> std::string {
-    const char* where = "around";
-    // clang-format off
+    const char* position = "around";
     if (has_leading_space) {
-      where = "after";
+      position = "after";
     } else if (has_trailing_space) {
-      where = "before";
+      position = "before";
     }
-    // clang-format on
-    return llvm::formatv(Message, where);
+    return llvm::formatv(Message, position);
   }
+
+  bool has_leading_space;
+  bool has_trailing_space;
 };
 
 struct UnaryOperatorHasWhitespace
@@ -178,11 +176,11 @@ struct UnaryOperatorHasWhitespace
   static constexpr const char* Message =
       "Whitespace is not allowed {0} this unary operator.";
 
-  bool prefix;
-
   auto Format() -> std::string {
     return llvm::formatv(Message, prefix ? "after" : "before");
   }
+
+  bool prefix;
 };
 
 struct UnaryOperatorRequiresWhitespace
@@ -191,11 +189,11 @@ struct UnaryOperatorRequiresWhitespace
   static constexpr const char* Message =
       "Whitespace is required {0} this unary operator.";
 
-  bool prefix;
-
   auto Format() -> std::string {
     return llvm::formatv(Message, prefix ? "before" : "after");
   }
+
+  bool prefix;
 };
 
 struct OperatorRequiresParentheses
@@ -207,15 +205,15 @@ struct OperatorRequiresParentheses
 
 ParseTree::Parser::Parser(ParseTree& tree_arg, TokenizedBuffer& tokens_arg,
                           TokenDiagnosticEmitter& emitter)
-    : tree(tree_arg),
-      tokens(tokens_arg),
-      emitter(emitter),
-      position(tokens.Tokens().begin()),
-      end(tokens.Tokens().end()) {
-  assert(std::find_if(position, end,
+    : tree_(tree_arg),
+      tokens_(tokens_arg),
+      emitter_(emitter),
+      position_(tokens_.Tokens().begin()),
+      end_(tokens_.Tokens().end()) {
+  assert(std::find_if(position_, end_,
                       [&](TokenizedBuffer::Token t) {
-                        return tokens.GetKind(t) == TokenKind::EndOfFile();
-                      }) != end &&
+                        return tokens_.GetKind(t) == TokenKind::EndOfFile();
+                      }) != end_ &&
          "No EndOfFileToken in token buffer.");
 }
 
@@ -226,18 +224,18 @@ auto ParseTree::Parser::Parse(TokenizedBuffer& tokens,
   // We expect to have a 1:1 correspondence between tokens and tree nodes, so
   // reserve the space we expect to need here to avoid allocation and copying
   // overhead.
-  tree.node_impls.reserve(tokens.Size());
+  tree.node_impls_.reserve(tokens.Size());
 
   Parser parser(tree, tokens, emitter);
   while (!parser.AtEndOfFile()) {
     if (!parser.ParseDeclaration()) {
       // We don't have an enclosing parse tree node to mark as erroneous, so
       // just mark the tree as a whole.
-      tree.has_errors = true;
+      tree.has_errors_ = true;
     }
   }
 
-  parser.AddLeafNode(ParseNodeKind::FileEnd(), *parser.position);
+  parser.AddLeafNode(ParseNodeKind::FileEnd(), *parser.position_);
 
   assert(tree.Verify() && "Parse tree built but does not verify!");
   return tree;
@@ -246,9 +244,10 @@ auto ParseTree::Parser::Parse(TokenizedBuffer& tokens,
 auto ParseTree::Parser::Consume(TokenKind kind) -> TokenizedBuffer::Token {
   assert(kind != TokenKind::EndOfFile() && "Cannot consume the EOF token!");
   assert(NextTokenIs(kind) && "The current token is the wrong kind!");
-  TokenizedBuffer::Token t = *position;
-  ++position;
-  assert(position != end && "Reached end of tokens without finding EOF token.");
+  TokenizedBuffer::Token t = *position_;
+  ++position_;
+  assert(position_ != end_ &&
+         "Reached end of tokens without finding EOF token.");
   return t;
 }
 
@@ -262,8 +261,8 @@ auto ParseTree::Parser::ConsumeIf(TokenKind kind)
 
 auto ParseTree::Parser::AddLeafNode(ParseNodeKind kind,
                                     TokenizedBuffer::Token token) -> Node {
-  Node n(tree.node_impls.size());
-  tree.node_impls.push_back(NodeImpl(kind, token, /*subtree_size_arg=*/1));
+  Node n(tree_.node_impls_.size());
+  tree_.node_impls_.push_back(NodeImpl(kind, token, /*subtree_size_arg=*/1));
   return n;
 }
 
@@ -279,8 +278,8 @@ auto ParseTree::Parser::ConsumeAndAddLeafNodeIf(TokenKind t_kind,
 }
 
 auto ParseTree::Parser::MarkNodeError(Node n) -> void {
-  tree.node_impls[n.index].has_error = true;
-  tree.has_errors = true;
+  tree_.node_impls_[n.index_].has_error = true;
+  tree_.has_errors_ = true;
 }
 
 // A marker for the start of a node's subtree.
@@ -292,18 +291,18 @@ struct ParseTree::Parser::SubtreeStart {
 };
 
 auto ParseTree::Parser::GetSubtreeStartPosition() -> SubtreeStart {
-  return {static_cast<int>(tree.node_impls.size())};
+  return {static_cast<int>(tree_.node_impls_.size())};
 }
 
 auto ParseTree::Parser::AddNode(ParseNodeKind n_kind, TokenizedBuffer::Token t,
                                 SubtreeStart start, bool has_error) -> Node {
   // The size of the subtree is the change in size from when we started this
   // subtree to now, but including the node we're about to add.
-  int tree_stop_size = static_cast<int>(tree.node_impls.size()) + 1;
+  int tree_stop_size = static_cast<int>(tree_.node_impls_.size()) + 1;
   int subtree_size = tree_stop_size - start.tree_size;
 
-  Node n(tree.node_impls.size());
-  tree.node_impls.push_back(NodeImpl(n_kind, t, subtree_size));
+  Node n(tree_.node_impls_.size());
+  tree_.node_impls_.push_back(NodeImpl(n_kind, t, subtree_size));
   if (has_error) {
     MarkNodeError(n);
   }
@@ -312,30 +311,30 @@ auto ParseTree::Parser::AddNode(ParseNodeKind n_kind, TokenizedBuffer::Token t,
 }
 
 auto ParseTree::Parser::SkipMatchingGroup() -> bool {
-  TokenizedBuffer::Token t = *position;
-  TokenKind t_kind = tokens.GetKind(t);
+  TokenizedBuffer::Token t = *position_;
+  TokenKind t_kind = tokens_.GetKind(t);
   if (!t_kind.IsOpeningSymbol()) {
     return false;
   }
 
-  SkipTo(tokens.GetMatchedClosingToken(t));
+  SkipTo(tokens_.GetMatchedClosingToken(t));
   Consume(t_kind.GetClosingSymbol());
   return true;
 }
 
 auto ParseTree::Parser::SkipTo(TokenizedBuffer::Token t) -> void {
-  assert(t >= *position && "Tried to skip backwards.");
-  position = TokenizedBuffer::TokenIterator(t);
-  assert(position != end && "Skipped past EOF.");
+  assert(t >= *position_ && "Tried to skip backwards.");
+  position_ = TokenizedBuffer::TokenIterator(t);
+  assert(position_ != end_ && "Skipped past EOF.");
 }
 
 auto ParseTree::Parser::FindNextOf(
     std::initializer_list<TokenKind> desired_kinds)
     -> llvm::Optional<TokenizedBuffer::Token> {
-  auto new_position = position;
+  auto new_position = position_;
   while (true) {
     TokenizedBuffer::Token token = *new_position;
-    TokenKind kind = tokens.GetKind(token);
+    TokenKind kind = tokens_.GetKind(token);
     if (kind.IsOneOf(desired_kinds)) {
       return token;
     }
@@ -346,7 +345,7 @@ auto ParseTree::Parser::FindNextOf(
       return llvm::None;
     } else if (kind.IsOpeningSymbol()) {
       new_position =
-          TokenizedBuffer::TokenIterator(tokens.GetMatchedClosingToken(token));
+          TokenizedBuffer::TokenIterator(tokens_.GetMatchedClosingToken(token));
     } else {
       ++new_position;
     }
@@ -360,19 +359,19 @@ auto ParseTree::Parser::SkipPastLikelyEnd(TokenizedBuffer::Token skip_root,
     return llvm::None;
   }
 
-  TokenizedBuffer::Line root_line = tokens.GetLine(skip_root);
-  int root_line_indent = tokens.GetIndentColumnNumber(root_line);
+  TokenizedBuffer::Line root_line = tokens_.GetLine(skip_root);
+  int root_line_indent = tokens_.GetIndentColumnNumber(root_line);
 
   // We will keep scanning through tokens on the same line as the root or
   // lines with greater indentation than root's line.
   auto is_same_line_or_indent_greater_than_root =
       [&](TokenizedBuffer::Token t) {
-        TokenizedBuffer::Line l = tokens.GetLine(t);
+        TokenizedBuffer::Line l = tokens_.GetLine(t);
         if (l == root_line) {
           return true;
         }
 
-        return tokens.GetIndentColumnNumber(l) > root_line_indent;
+        return tokens_.GetIndentColumnNumber(l) > root_line_indent;
       };
 
   do {
@@ -388,7 +387,7 @@ auto ParseTree::Parser::SkipPastLikelyEnd(TokenizedBuffer::Token skip_root,
       return on_semi(*semi);
     }
 
-    // Skip over any matching group of tokens.
+    // Skip over any matching group of tokens_.
     if (SkipMatchingGroup()) {
       continue;
     }
@@ -396,7 +395,7 @@ auto ParseTree::Parser::SkipPastLikelyEnd(TokenizedBuffer::Token skip_root,
     // Otherwise just step forward one token.
     Consume(NextTokenKind());
   } while (!AtEndOfFile() &&
-           is_same_line_or_indent_greater_than_root(*position));
+           is_same_line_or_indent_greater_than_root(*position_));
 
   return llvm::None;
 }
@@ -409,8 +408,9 @@ auto ParseTree::Parser::ParseCloseParen(TokenizedBuffer::Token open_paren,
     return close_paren;
   }
 
-  emitter.EmitError<ExpectedCloseParen>(*position, {.open_paren = open_paren});
-  SkipTo(tokens.GetMatchedClosingToken(open_paren));
+  emitter_.EmitError<ExpectedCloseParen>(*position_,
+                                         {.open_paren = open_paren});
+  SkipTo(tokens_.GetMatchedClosingToken(open_paren));
   AddLeafNode(kind, Consume(TokenKind::CloseParen()));
   return llvm::None;
 }
@@ -441,8 +441,8 @@ auto ParseTree::Parser::ParseList(TokenKind open, TokenKind close,
 
       if (!NextTokenIsOneOf({close, TokenKind::Comma()})) {
         if (!element_error) {
-          emitter.EmitError<UnexpectedTokenAfterListElement>(*position,
-                                                             {.close = close});
+          emitter_.EmitError<UnexpectedTokenAfterListElement>(*position_,
+                                                              {.close = close});
         }
         has_errors = true;
 
@@ -471,7 +471,7 @@ auto ParseTree::Parser::ParseList(TokenKind open, TokenKind close,
 
 auto ParseTree::Parser::ParsePattern(PatternKind kind) -> llvm::Optional<Node> {
   if (NextTokenIs(TokenKind::Identifier()) &&
-      tokens.GetKind(*(position + 1)) == TokenKind::Colon()) {
+      tokens_.GetKind(*(position_ + 1)) == TokenKind::Colon()) {
     // identifier `:` type
     auto start = GetSubtreeStartPosition();
     AddLeafNode(ParseNodeKind::DeclaredName(),
@@ -484,11 +484,11 @@ auto ParseTree::Parser::ParsePattern(PatternKind kind) -> llvm::Optional<Node> {
 
   switch (kind) {
     case PatternKind::Parameter:
-      emitter.EmitError<ExpectedParameterName>(*position);
+      emitter_.EmitError<ExpectedParameterName>(*position_);
       break;
 
     case PatternKind::Variable:
-      emitter.EmitError<ExpectedVariableName>(*position);
+      emitter_.EmitError<ExpectedVariableName>(*position_);
       break;
   }
 
@@ -530,7 +530,7 @@ auto ParseTree::Parser::ParseCodeBlock() -> llvm::Optional<Node> {
       ConsumeIf(TokenKind::OpenCurlyBrace());
   if (!maybe_open_curly) {
     // Recover by parsing a single statement.
-    emitter.EmitError<ExpectedCodeBlock>(*position);
+    emitter_.EmitError<ExpectedCodeBlock>(*position_);
     return ParseStatement();
   }
   TokenizedBuffer::Token open_curly = *maybe_open_curly;
@@ -546,7 +546,7 @@ auto ParseTree::Parser::ParseCodeBlock() -> llvm::Optional<Node> {
       // to the actual close curly brace from here.
       // FIXME: It would be better to skip to the next semicolon, or the next
       // token at the start of a line with the same indent as this one.
-      SkipTo(tokens.GetMatchedClosingToken(open_curly));
+      SkipTo(tokens_.GetMatchedClosingToken(open_curly));
       has_errors = true;
       break;
     }
@@ -576,7 +576,7 @@ auto ParseTree::Parser::ParseFunctionDeclaration() -> Node {
   auto name_n = ConsumeAndAddLeafNodeIf(TokenKind::Identifier(),
                                         ParseNodeKind::DeclaredName());
   if (!name_n) {
-    emitter.EmitError<ExpectedFunctionName>(*position);
+    emitter_.EmitError<ExpectedFunctionName>(*position_);
     // FIXME: We could change the lexer to allow us to synthesize certain
     // kinds of tokens and try to "recover" here, but unclear that this is
     // really useful.
@@ -584,14 +584,14 @@ auto ParseTree::Parser::ParseFunctionDeclaration() -> Node {
     return add_error_function_node();
   }
 
-  TokenizedBuffer::Token open_paren = *position;
-  if (tokens.GetKind(open_paren) != TokenKind::OpenParen()) {
-    emitter.EmitError<ExpectedFunctionParams>(open_paren);
+  TokenizedBuffer::Token open_paren = *position_;
+  if (tokens_.GetKind(open_paren) != TokenKind::OpenParen()) {
+    emitter_.EmitError<ExpectedFunctionParams>(open_paren);
     SkipPastLikelyEnd(function_intro_token, handle_semi_in_error_recovery);
     return add_error_function_node();
   }
   TokenizedBuffer::Token close_paren =
-      tokens.GetMatchedClosingToken(open_paren);
+      tokens_.GetMatchedClosingToken(open_paren);
 
   if (!ParseFunctionSignature()) {
     // Don't try to parse more of the function declaration, but consume a
@@ -607,8 +607,8 @@ auto ParseTree::Parser::ParseFunctionDeclaration() -> Node {
     }
   } else if (!ConsumeAndAddLeafNodeIf(TokenKind::Semi(),
                                       ParseNodeKind::DeclarationEnd())) {
-    emitter.EmitError<ExpectedFunctionBodyOrSemi>(*position);
-    if (tokens.GetLine(*position) == tokens.GetLine(close_paren)) {
+    emitter_.EmitError<ExpectedFunctionBodyOrSemi>(*position_);
+    if (tokens_.GetLine(*position_) == tokens_.GetLine(close_paren)) {
       // Only need to skip if we've not already found a new line.
       SkipPastLikelyEnd(function_intro_token, handle_semi_in_error_recovery);
     }
@@ -643,7 +643,7 @@ auto ParseTree::Parser::ParseVariableDeclaration() -> Node {
   auto semi = ConsumeAndAddLeafNodeIf(TokenKind::Semi(),
                                       ParseNodeKind::DeclarationEnd());
   if (!semi) {
-    emitter.EmitError<ExpectedSemiAfterExpression>(*position);
+    emitter_.EmitError<ExpectedSemiAfterExpression>(*position_);
     SkipPastLikelyEnd(var_token, [&](TokenizedBuffer::Token semi) {
       return AddLeafNode(ParseNodeKind::DeclarationEnd(), semi);
     });
@@ -674,12 +674,12 @@ auto ParseTree::Parser::ParseDeclaration() -> llvm::Optional<Node> {
   }
 
   // We didn't recognize an introducer for a valid declaration.
-  emitter.EmitError<UnrecognizedDeclaration>(*position);
+  emitter_.EmitError<UnrecognizedDeclaration>(*position_);
 
   // Skip forward past any end of a declaration we simply didn't understand so
   // that we can find the start of the next declaration or the end of a scope.
   if (auto found_semi_n =
-          SkipPastLikelyEnd(*position, [&](TokenizedBuffer::Token semi) {
+          SkipPastLikelyEnd(*position_, [&](TokenizedBuffer::Token semi) {
             return AddLeafNode(ParseNodeKind::EmptyDeclaration(), semi);
           })) {
     MarkNodeError(*found_semi_n);
@@ -733,8 +733,8 @@ auto ParseTree::Parser::ParseBraceExpression() -> llvm::Optional<Node> {
         auto start_elem = GetSubtreeStartPosition();
 
         auto diagnose_invalid_syntax = [&] {
-          emitter.EmitError<ExpectedStructLiteralField>(
-              *position,
+          emitter_.EmitError<ExpectedStructLiteralField>(
+              *position_,
               {.can_be_type = kind != Value, .can_be_value = kind != Type});
           return llvm::None;
         };
@@ -749,7 +749,7 @@ auto ParseTree::Parser::ParseBraceExpression() -> llvm::Optional<Node> {
           auto recovery_pos = FindNextOf(
               {TokenKind::Equal(), TokenKind::Colon(), TokenKind::Comma()});
           if (!recovery_pos ||
-              tokens.GetKind(*recovery_pos) == TokenKind::Comma()) {
+              tokens_.GetKind(*recovery_pos) == TokenKind::Comma()) {
             return llvm::None;
           }
           SkipTo(*recovery_pos);
@@ -809,7 +809,7 @@ auto ParseTree::Parser::ParsePrimaryExpression() -> llvm::Optional<Node> {
       return ParseBraceExpression();
 
     default:
-      emitter.EmitError<ExpectedExpression>(*position);
+      emitter_.EmitError<ExpectedExpression>(*position_);
       return llvm::None;
   }
 
@@ -826,7 +826,7 @@ auto ParseTree::Parser::ParseDesignatorExpression(SubtreeStart start,
   if (name) {
     AddLeafNode(ParseNodeKind::DesignatedName(), *name);
   } else {
-    emitter.EmitError<ExpectedIdentifierAfterDot>(*position);
+    emitter_.EmitError<ExpectedIdentifierAfterDot>(*position_);
     // If we see a keyword, assume it was intended to be the designated name.
     // TODO: Should keywords be valid in designators?
     if (NextTokenKind().IsKeyword()) {
@@ -909,8 +909,8 @@ static auto IsPossibleStartOfOperand(TokenKind kind) -> bool {
 auto ParseTree::Parser::IsLexicallyValidInfixOperator() -> bool {
   assert(!AtEndOfFile() && "Expected an operator token.");
 
-  bool leading_space = tokens.HasLeadingWhitespace(*position);
-  bool trailing_space = tokens.HasTrailingWhitespace(*position);
+  bool leading_space = tokens_.HasLeadingWhitespace(*position_);
+  bool trailing_space = tokens_.HasTrailingWhitespace(*position_);
 
   // If there's whitespace on both sides, it's an infix operator.
   if (leading_space && trailing_space) {
@@ -925,9 +925,9 @@ auto ParseTree::Parser::IsLexicallyValidInfixOperator() -> bool {
   // Otherwise, for an infix operator, the preceding token must be any close
   // bracket, identifier, or literal and the next token must be an open paren,
   // identifier, or literal.
-  if (position == tokens.Tokens().begin() ||
-      !IsAssumedEndOfOperand(tokens.GetKind(*(position - 1))) ||
-      !IsAssumedStartOfOperand(tokens.GetKind(*(position + 1)))) {
+  if (position_ == tokens_.Tokens().begin() ||
+      !IsAssumedEndOfOperand(tokens_.GetKind(*(position_ - 1))) ||
+      !IsAssumedStartOfOperand(tokens_.GetKind(*(position_ + 1)))) {
     return false;
   }
 
@@ -940,10 +940,10 @@ auto ParseTree::Parser::DiagnoseOperatorFixity(OperatorFixity fixity) -> void {
   if (fixity == OperatorFixity::Infix) {
     // Infix operators must satisfy the infix operator rules.
     if (!is_valid_as_infix) {
-      emitter.EmitError<BinaryOperatorRequiresWhitespace>(
-          *position,
-          {.has_leading_space = tokens.HasLeadingWhitespace(*position),
-           .has_trailing_space = tokens.HasTrailingWhitespace(*position)});
+      emitter_.EmitError<BinaryOperatorRequiresWhitespace>(
+          *position_,
+          {.has_leading_space = tokens_.HasLeadingWhitespace(*position_),
+           .has_trailing_space = tokens_.HasTrailingWhitespace(*position_)});
     }
   } else {
     bool prefix = fixity == OperatorFixity::Prefix;
@@ -951,15 +951,15 @@ auto ParseTree::Parser::DiagnoseOperatorFixity(OperatorFixity fixity) -> void {
     // Whitespace is not permitted between a symbolic pre/postfix operator and
     // its operand.
     if (NextTokenKind().IsSymbol() &&
-        (prefix ? tokens.HasTrailingWhitespace(*position)
-                : tokens.HasLeadingWhitespace(*position))) {
-      emitter.EmitError<UnaryOperatorHasWhitespace>(*position,
-                                                    {.prefix = prefix});
+        (prefix ? tokens_.HasTrailingWhitespace(*position_)
+                : tokens_.HasLeadingWhitespace(*position_))) {
+      emitter_.EmitError<UnaryOperatorHasWhitespace>(*position_,
+                                                     {.prefix = prefix});
     }
     // Pre/postfix operators must not satisfy the infix operator rules.
     if (is_valid_as_infix) {
-      emitter.EmitError<UnaryOperatorRequiresWhitespace>(*position,
-                                                         {.prefix = prefix});
+      emitter_.EmitError<UnaryOperatorRequiresWhitespace>(*position_,
+                                                          {.prefix = prefix});
     }
   }
 }
@@ -972,7 +972,7 @@ auto ParseTree::Parser::IsTrailingOperatorInfix() -> bool {
   // An operator that follows the infix operator rules is parsed as
   // infix, unless the next token means that it can't possibly be.
   if (IsLexicallyValidInfixOperator() &&
-      IsPossibleStartOfOperand(tokens.GetKind(*(position + 1)))) {
+      IsPossibleStartOfOperand(tokens_.GetKind(*(position_ + 1)))) {
     return true;
   }
 
@@ -980,8 +980,8 @@ auto ParseTree::Parser::IsTrailingOperatorInfix() -> bool {
   // not valid at all. If the next token looks like the start of an operand,
   // then parse as infix, otherwise as postfix. Either way we'll produce a
   // diagnostic later on.
-  if (tokens.HasLeadingWhitespace(*position) &&
-      IsAssumedStartOfOperand(tokens.GetKind(*(position + 1)))) {
+  if (tokens_.HasLeadingWhitespace(*position_) &&
+      IsAssumedStartOfOperand(tokens_.GetKind(*(position_ + 1)))) {
     return true;
   }
 
@@ -1005,7 +1005,7 @@ auto ParseTree::Parser::ParseOperatorExpression(
         OperatorPriority::RightFirst) {
       // The precedence rules don't permit this prefix operator in this
       // context. Diagnose this, but carry on and parse it anyway.
-      emitter.EmitError<OperatorRequiresParentheses>(*position);
+      emitter_.EmitError<OperatorRequiresParentheses>(*position_);
     } else {
       // Check that this operator follows the proper whitespace rules.
       DiagnoseOperatorFixity(OperatorFixity::Prefix);
@@ -1038,7 +1038,7 @@ auto ParseTree::Parser::ParseOperatorExpression(
       // Either the LHS operator and this operator are ambiguous, or the
       // LHS operaor is a unary operator that can't be nested within
       // this operator. Either way, parentheses are required.
-      emitter.EmitError<OperatorRequiresParentheses>(*position);
+      emitter_.EmitError<OperatorRequiresParentheses>(*position_);
       lhs = llvm::None;
     } else {
       DiagnoseOperatorFixity(is_binary ? OperatorFixity::Infix
@@ -1070,7 +1070,7 @@ auto ParseTree::Parser::ParseType() -> llvm::Optional<Node> {
 }
 
 auto ParseTree::Parser::ParseExpressionStatement() -> llvm::Optional<Node> {
-  TokenizedBuffer::Token start_token = *position;
+  TokenizedBuffer::Token start_token = *position_;
   auto start = GetSubtreeStartPosition();
 
   bool has_errors = !ParseExpression();
@@ -1081,7 +1081,7 @@ auto ParseTree::Parser::ParseExpressionStatement() -> llvm::Optional<Node> {
   }
 
   if (!has_errors) {
-    emitter.EmitError<ExpectedSemiAfterExpression>(*position);
+    emitter_.EmitError<ExpectedSemiAfterExpression>(*position_);
   }
 
   if (auto recovery_node =
@@ -1102,8 +1102,8 @@ auto ParseTree::Parser::ParseParenCondition(TokenKind introducer)
   auto start = GetSubtreeStartPosition();
   auto open_paren = ConsumeIf(TokenKind::OpenParen());
   if (!open_paren) {
-    emitter.EmitError<ExpectedParenAfter>(*position,
-                                          {.introducer = introducer});
+    emitter_.EmitError<ExpectedParenAfter>(*position_,
+                                           {.introducer = introducer});
   }
 
   auto expr = ParseExpression();
@@ -1167,8 +1167,8 @@ auto ParseTree::Parser::ParseKeywordStatement(ParseNodeKind kind,
   auto semi =
       ConsumeAndAddLeafNodeIf(TokenKind::Semi(), ParseNodeKind::StatementEnd());
   if (!semi) {
-    emitter.EmitError<ExpectedSemiAfter>(*position,
-                                         {.preceding = keyword_kind});
+    emitter_.EmitError<ExpectedSemiAfter>(*position_,
+                                          {.preceding = keyword_kind});
     // FIXME: Try to skip to a semicolon to recover.
   }
   return AddNode(kind, keyword, start, /*has_error=*/!semi || arg_error);

+ 7 - 7
toolchain/parser/parser_impl.h

@@ -30,12 +30,12 @@ class ParseTree::Parser {
                   TokenDiagnosticEmitter& emitter);
 
   auto AtEndOfFile() -> bool {
-    return tokens.GetKind(*position) == TokenKind::EndOfFile();
+    return tokens_.GetKind(*position_) == TokenKind::EndOfFile();
   }
 
   // Gets the kind of the next token to be consumed.
   [[nodiscard]] auto NextTokenKind() const -> TokenKind {
-    return tokens.GetKind(*position);
+    return tokens_.GetKind(*position_);
   }
 
   // Tests whether the next token to be consumed is of the specified kind.
@@ -268,15 +268,15 @@ class ParseTree::Parser {
   // Parses a pattern.
   auto ParsePattern(PatternKind kind) -> llvm::Optional<Node>;
 
-  ParseTree& tree;
-  TokenizedBuffer& tokens;
-  TokenDiagnosticEmitter& emitter;
+  ParseTree& tree_;
+  TokenizedBuffer& tokens_;
+  TokenDiagnosticEmitter& emitter_;
 
   // The current position within the token buffer. Never equal to `end`.
-  TokenizedBuffer::TokenIterator position;
+  TokenizedBuffer::TokenIterator position_;
   // The end position of the token buffer. There will always be an `EndOfFile`
   // token between `position` (inclusive) and `end` (exclusive).
-  TokenizedBuffer::TokenIterator end;
+  TokenizedBuffer::TokenIterator end_;
 };
 
 }  // namespace Carbon

+ 1 - 1
toolchain/parser/precedence.cpp

@@ -318,7 +318,7 @@ auto PrecedenceGroup::ForTrailing(TokenKind kind, bool infix)
 auto PrecedenceGroup::GetPriority(PrecedenceGroup left, PrecedenceGroup right)
     -> OperatorPriority {
   static constexpr OperatorPriorityTable Lookup;
-  return Lookup.table[left.level][right.level];
+  return Lookup.table[left.level_][right.level_];
 }
 
 }  // namespace Carbon

+ 10 - 11
toolchain/parser/precedence.h

@@ -29,13 +29,9 @@ enum class Associativity : int8_t {
 
 // A precedence group associated with an operator or expression.
 class PrecedenceGroup {
- private:
-  // We rely on implicit conversions via `int8_t` for enumerators defined in the
-  // implementation.
-  // NOLINTNEXTLINE(google-explicit-constructor)
-  PrecedenceGroup(int8_t level) : level(level) {}
-
  public:
+  struct Trailing;
+
   // Objects of this type should only be constructed using the static factory
   // functions below.
   PrecedenceGroup() = delete;
@@ -56,8 +52,6 @@ class PrecedenceGroup {
   // return llvm::None if the given token is not a prefix operator.
   static auto ForLeading(TokenKind kind) -> llvm::Optional<PrecedenceGroup>;
 
-  struct Trailing;
-
   // Look up the operator information of the given infix or postfix operator
   // token, or return llvm::None if the given token is not an infix or postfix
   // operator. `infix` indicates whether this is a valid infix operator, but is
@@ -67,10 +61,10 @@ class PrecedenceGroup {
       -> llvm::Optional<Trailing>;
 
   friend auto operator==(PrecedenceGroup lhs, PrecedenceGroup rhs) -> bool {
-    return lhs.level == rhs.level;
+    return lhs.level_ == rhs.level_;
   }
   friend auto operator!=(PrecedenceGroup lhs, PrecedenceGroup rhs) -> bool {
-    return lhs.level != rhs.level;
+    return lhs.level_ != rhs.level_;
   }
 
   // Compare the precedence levels for two adjacent operators.
@@ -83,8 +77,13 @@ class PrecedenceGroup {
   }
 
  private:
+  // We rely on implicit conversions via `int8_t` for enumerators defined in the
+  // implementation.
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  PrecedenceGroup(int8_t level) : level_(level) {}
+
   // The precedence level.
-  int8_t level;
+  int8_t level_;
 };
 
 // Precedence information for a trailing operator.

+ 10 - 10
toolchain/source/source_buffer.h

@@ -71,6 +71,16 @@ class SourceBuffer {
   [[nodiscard]] auto Text() const -> llvm::StringRef { return text_; }
 
  private:
+  SourceBuffer(llvm::StringRef fake_filename, std::string buffer_text)
+      : filename_(fake_filename.str()),
+        is_string_rep_(true),
+        string_storage_(std::move(buffer_text)) {
+    text_ = string_storage_;
+  }
+
+  explicit SourceBuffer(llvm::StringRef filename)
+      : filename_(filename.str()), text_(), is_string_rep_(false) {}
+
   std::string filename_;
 
   llvm::StringRef text_;
@@ -83,16 +93,6 @@ class SourceBuffer {
   union {
     std::string string_storage_;
   };
-
-  explicit SourceBuffer(llvm::StringRef fake_filename, std::string buffer_text)
-      : filename_(fake_filename.str()),
-        is_string_rep_(true),
-        string_storage_(std::move(buffer_text)) {
-    text_ = string_storage_;
-  }
-
-  explicit SourceBuffer(llvm::StringRef filename)
-      : filename_(filename.str()), text_(), is_string_rep_(false) {}
 };
 
 }  // namespace Carbon