Explorar el Código

Enable most relevant clang-tidy checks and fix uncovered issues. (#220)

Most of these were fixed automatically (including things like adding
`[[nodiscard]]` and such). A number of others required manual edits.
I think all of them were pretty nice improvements.

There were a few places where the issues really stem from external
constraints and I've disabled the checks: GoogleTest macros or the
specific LibFuzzer entry points.

The only other places I disabled are the implicit conversions to
a private `enum` in the classes wrapping those `enum`s. These implicit
conversions are necessarily implicit to serve their only purpose:
enabling their use in `switch` statements and `case` labels. When these
were highlighted, it showed that one of these was actually converting to
an *`int`*. I've switched that to use the private `enum` instead as
doing so is important to enable warnings on non-covering `switch`
statements over than `enum`. And indeed, there is a `switch` that was
was implicitly relying on falling through in this way, so I've added the
explicit documentation of the intentional pattern to address that
warning.

Sorry this is so large, all of this somewhat fell out of enabling the
`clang-tidy` checks. If it is too difficult to review as lump, I can
work on breaking it apart as needed. Just let me know.
Chandler Carruth hace 5 años
padre
commit
d4a2d435b8

+ 7 - 1
.clang-tidy

@@ -3,7 +3,13 @@
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 
 ---
-Checks: '-*,readability-braces-around-statements,readability-identifier-naming'
+Checks:
+  -*, bugprone-*, google-*, -google-readability-todo,
+  misc-definitions-in-headers, misc-misplaced-const, misc-redundant-expression,
+  misc-static-assert, misc-unconventional-assign-operator,
+  misc-uniqueptr-reset-release, misc-unused-*, modernize-*,
+  -modernize-avoid-c-arrays, performance-*,
+  readability-braces-around-statements, readability-identifier-naming
 WarningsAsErrors: true
 CheckOptions:
   - { key: readability-identifier-naming.ClassCase, value: CamelCase }

+ 1 - 1
diagnostics/diagnostic_emitter.h

@@ -34,7 +34,7 @@ class DiagnosticEmitter {
 
   explicit DiagnosticEmitter(Callback callback)
       : callback_(std::move(callback)) {}
-  ~DiagnosticEmitter() {}
+  ~DiagnosticEmitter() = default;
 
   // Emits an error unconditionally.  `F` is guaranteed to be called.
   template <typename DiagnosticT>

+ 2 - 1
diagnostics/diagnostic_emitter_test.cpp

@@ -12,7 +12,8 @@
 namespace Carbon {
 namespace {
 
-using namespace ::testing;
+using ::testing::ElementsAre;
+using ::testing::Eq;
 
 struct FakeDiagnostic {
   static constexpr llvm::StringLiteral ShortName = "fake-diagnostic";

+ 22 - 17
lexer/token_kind.h

@@ -5,8 +5,7 @@
 #ifndef LEXER_TOKEN_KIND_H_
 #define LEXER_TOKEN_KIND_H_
 
-#include <stdint.h>
-
+#include <cstdint>
 #include <iterator>
 
 #include "llvm/ADT/StringRef.h"
@@ -20,8 +19,12 @@ class TokenKind {
   };
 
  public:
-#define CARBON_TOKEN(TokenName) \
-  static constexpr auto TokenName()->TokenKind { return KindEnum::TokenName; }
+  // The formatting for this macro is weird due to a `clang-format` bug. See
+  // https://bugs.llvm.org/show_bug.cgi?id=48320 for details.
+#define CARBON_TOKEN(TokenName)                  \
+  static constexpr auto TokenName()->TokenKind { \
+    return TokenKind(KindEnum::TokenName);       \
+  }
 #include "lexer/token_registry.def"
 
   // The default constructor is deleted as objects of this type should always be
@@ -36,7 +39,7 @@ class TokenKind {
   }
 
   // Get a friendly name for the token for logging or debugging.
-  auto Name() const -> llvm::StringRef;
+  [[nodiscard]] auto Name() const -> llvm::StringRef;
 
   // Test whether this kind of token is a simple symbol sequence (punctuation,
   // not letters) that appears directly in the source text and can be
@@ -44,41 +47,43 @@ class TokenKind {
   // inside of other tokens, outside of the contents of other tokens they
   // don't require any specific characters before or after to distinguish them
   // in the source. Returns false otherwise.
-  auto IsSymbol() const -> bool;
+  [[nodiscard]] auto IsSymbol() const -> bool;
 
   // Test whether this kind of token is a grouping symbol (part of an opening
   // and closing pair that must always be matched in the token stream).
-  auto IsGroupingSymbol() const -> bool;
+  [[nodiscard]] auto IsGroupingSymbol() const -> bool;
 
   // Test whether this kind of token is an opening symbol for a group.
-  auto IsOpeningSymbol() const -> bool;
+  [[nodiscard]] auto IsOpeningSymbol() const -> bool;
 
   // Returns the associated closing symbol for an opening symbol.
   //
   // The token kind must be an opening symbol.
-  auto GetClosingSymbol() const -> TokenKind;
+  [[nodiscard]] auto GetClosingSymbol() const -> TokenKind;
 
   // Test whether this kind of token is an closing symbol for a group.
-  auto IsClosingSymbol() const -> bool;
+  [[nodiscard]] auto IsClosingSymbol() const -> bool;
 
   // Returns the associated opening symbol for a closing symbol.
   //
   // The token kind must be an closing symbol.
-  auto GetOpeningSymbol() const -> TokenKind;
+  [[nodiscard]] auto GetOpeningSymbol() const -> TokenKind;
 
   // Test whether this kind of token is a keyword.
-  auto IsKeyword() const -> bool;
+  [[nodiscard]] auto IsKeyword() const -> bool;
 
   // If this token kind has a fixed spelling when in source code, returns it.
   // Otherwise returns an empty string.
-  auto GetFixedSpelling() const -> llvm::StringRef;
+  [[nodiscard]] auto GetFixedSpelling() const -> llvm::StringRef;
 
-  // Enable implicit conversion to an int, including in a `constexpr` context,
-  // to enable usage in `switch` and `case`.
-  constexpr operator int() const { return static_cast<int>(kind_value); }
+  // Enable conversion to our private enum, including in a `constexpr` context,
+  // to enable usage in `switch` and `case`. The enum remains private and
+  // nothing else should be using this.
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  constexpr operator KindEnum() const { return kind_value; }
 
  private:
-  constexpr TokenKind(KindEnum kind_value) : kind_value(kind_value) {}
+  constexpr explicit TokenKind(KindEnum kind_value) : kind_value(kind_value) {}
 
   KindEnum kind_value;
 };

+ 1 - 1
lexer/token_kind_test.cpp

@@ -4,7 +4,7 @@
 
 #include "lexer/token_kind.h"
 
-#include <string.h>
+#include <cstring>
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"

+ 22 - 12
lexer/tokenized_buffer.cpp

@@ -322,9 +322,7 @@ class TokenizedBuffer::Lexer {
       }
       switch (c) {
         case '_':
-          return false;
         case '\t':
-          return false;
         case '\n':
           return false;
       }
@@ -487,16 +485,28 @@ auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
   indent = std::max(widths.indent, indent);
 }
 
+// Compute the printed width of a number. When numbers are printed in decimal,
+// the number of digits needed is is one more than the log-base-10 of the value.
+// We handle a value of `zero` explicitly.
+//
+// This routine requires its argument to be *non-negative*.
+static auto ComputeDecimalPrintedWidth(int number) -> int {
+  assert(number >= 0 && "Negative numbers are not supported.");
+  if (number == 0) {
+    return 1;
+  }
+
+  return static_cast<int>(std::log10(number)) + 1;
+}
+
 auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
   PrintWidths widths = {};
-  // Compute the printed width of the various token information. When numbers
-  // here are printed in decimal, the number of digits needed is is one more
-  // than the log-base-10 of the value.
-  widths.index = std::log10(token_infos.size()) + 1;
+  widths.index = ComputeDecimalPrintedWidth(token_infos.size());
   widths.kind = GetKind(token).Name().size();
-  widths.line = std::log10(GetLineNumber(token)) + 1;
-  widths.column = std::log10(GetColumnNumber(token)) + 1;
-  widths.indent = std::log10(GetIndentColumnNumber(GetLine(token))) + 1;
+  widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
+  widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
+  widths.indent =
+      ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
   return widths;
 }
 
@@ -506,7 +516,7 @@ auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
   }
 
   PrintWidths widths = {};
-  widths.index = std::log10(token_infos.size()) + 1;
+  widths.index = ComputeDecimalPrintedWidth((token_infos.size()));
   for (Token token : Tokens()) {
     widths.Widen(GetTokenPrintWidths(token));
   }
@@ -570,7 +580,7 @@ auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
 
 auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
   line_infos.push_back(info);
-  return Line(line_infos.size() - 1);
+  return Line(static_cast<int>(line_infos.size()) - 1);
 }
 
 auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
@@ -583,7 +593,7 @@ auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
 
 auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
   token_infos.push_back(info);
-  return Token(token_infos.size() - 1);
+  return Token(static_cast<int>(token_infos.size()) - 1);
 }
 
 }  // namespace Carbon

+ 64 - 42
lexer/tokenized_buffer.h

@@ -5,8 +5,7 @@
 #ifndef LEXER_TOKENIZED_BUFFER_H_
 #define LEXER_TOKENIZED_BUFFER_H_
 
-#include <stdint.h>
-
+#include <cstdint>
 #include <iterator>
 
 #include "diagnostics/diagnostic_emitter.h"
@@ -48,12 +47,20 @@ class TokenizedBuffer {
    public:
     Token() = default;
 
-    bool operator==(const Token& rhs) const { return index == rhs.index; }
-    bool operator!=(const Token& rhs) const { return index != rhs.index; }
-    bool operator<(const Token& rhs) const { return index < rhs.index; }
-    bool operator<=(const Token& rhs) const { return index <= rhs.index; }
-    bool operator>(const Token& rhs) const { return index > rhs.index; }
-    bool operator>=(const Token& rhs) const { return index >= rhs.index; }
+    auto operator==(const Token& rhs) const -> bool {
+      return index == rhs.index;
+    }
+    auto operator!=(const Token& rhs) const -> bool {
+      return index != rhs.index;
+    }
+    auto operator<(const Token& rhs) const -> bool { return index < rhs.index; }
+    auto operator<=(const Token& rhs) const -> bool {
+      return index <= rhs.index;
+    }
+    auto operator>(const Token& rhs) const -> bool { return index > rhs.index; }
+    auto operator>=(const Token& rhs) const -> bool {
+      return index >= rhs.index;
+    }
 
    private:
     friend class TokenizedBuffer;
@@ -78,12 +85,20 @@ class TokenizedBuffer {
    public:
     Line() = default;
 
-    bool operator==(const Line& rhs) const { return index == rhs.index; }
-    bool operator!=(const Line& rhs) const { return index != rhs.index; }
-    bool operator<(const Line& rhs) const { return index < rhs.index; }
-    bool operator<=(const Line& rhs) const { return index <= rhs.index; }
-    bool operator>(const Line& rhs) const { return index > rhs.index; }
-    bool operator>=(const Line& rhs) const { return index >= rhs.index; }
+    auto operator==(const Line& rhs) const -> bool {
+      return index == rhs.index;
+    }
+    auto operator!=(const Line& rhs) const -> bool {
+      return index != rhs.index;
+    }
+    auto operator<(const Line& rhs) const -> bool { return index < rhs.index; }
+    auto operator<=(const Line& rhs) const -> bool {
+      return index <= rhs.index;
+    }
+    auto operator>(const Line& rhs) const -> bool { return index > rhs.index; }
+    auto operator>=(const Line& rhs) const -> bool {
+      return index >= rhs.index;
+    }
 
    private:
     friend class TokenizedBuffer;
@@ -110,8 +125,12 @@ class TokenizedBuffer {
 
     // Most normal APIs are provided by the `TokenizedBuffer`, we just support
     // basic comparison operations.
-    bool operator==(const Identifier& rhs) const { return index == rhs.index; }
-    bool operator!=(const Identifier& rhs) const { return index != rhs.index; }
+    auto operator==(const Identifier& rhs) const -> bool {
+      return index == rhs.index;
+    }
+    auto operator!=(const Identifier& rhs) const -> bool {
+      return index != rhs.index;
+    }
 
    private:
     friend class TokenizedBuffer;
@@ -130,23 +149,25 @@ class TokenizedBuffer {
 
     explicit TokenIterator(Token token) : token(token) {}
 
-    bool operator==(const TokenIterator& rhs) const {
+    auto operator==(const TokenIterator& rhs) const -> bool {
       return token == rhs.token;
     }
-    bool operator<(const TokenIterator& rhs) const { return token < rhs.token; }
+    auto operator<(const TokenIterator& rhs) const -> bool {
+      return token < rhs.token;
+    }
 
-    const Token& operator*() const { return token; }
-    Token& operator*() { return token; }
+    auto operator*() const -> const Token& { return token; }
+    auto operator*() -> Token& { return token; }
 
-    int operator-(const TokenIterator& rhs) const {
+    auto operator-(const TokenIterator& rhs) const -> int {
       return token.index - rhs.token.index;
     }
 
-    TokenIterator& operator+=(int n) {
+    auto operator+=(int n) -> TokenIterator& {
       token.index += n;
       return *this;
     }
-    TokenIterator& operator-=(int n) {
+    auto operator-=(int n) -> TokenIterator& {
       token.index -= n;
       return *this;
     }
@@ -164,33 +185,34 @@ class TokenizedBuffer {
   //
   // FIXME: Need to pass in some diagnostic machinery to report the details of
   // the error! Right now it prints to stderr.
-  static TokenizedBuffer Lex(SourceBuffer& source, DiagnosticEmitter& emitter);
+  static auto Lex(SourceBuffer& source, DiagnosticEmitter& emitter)
+      -> TokenizedBuffer;
 
   // Returns true if the buffer has errors that are detectable at lexing time.
-  auto HasErrors() const -> bool { return has_errors; }
+  [[nodiscard]] auto HasErrors() const -> bool { return has_errors; }
 
-  llvm::iterator_range<TokenIterator> Tokens() const {
+  [[nodiscard]] auto Tokens() const -> llvm::iterator_range<TokenIterator> {
     return llvm::make_range(TokenIterator(Token(0)),
                             TokenIterator(Token(token_infos.size())));
   }
 
-  auto Size() const -> int { return token_infos.size(); }
+  [[nodiscard]] auto Size() const -> int { return token_infos.size(); }
 
-  auto GetKind(Token token) const -> TokenKind;
-  auto GetLine(Token token) const -> Line;
+  [[nodiscard]] auto GetKind(Token token) const -> TokenKind;
+  [[nodiscard]] auto GetLine(Token token) const -> Line;
 
   // Returns the 1-based line number.
-  auto GetLineNumber(Token token) const -> int;
+  [[nodiscard]] auto GetLineNumber(Token token) const -> int;
 
   // Returns the 1-based column number.
-  auto GetColumnNumber(Token token) const -> int;
+  [[nodiscard]] auto GetColumnNumber(Token token) const -> int;
 
   // Returns the source text lexed into this token.
-  auto GetTokenText(Token token) const -> llvm::StringRef;
+  [[nodiscard]] auto GetTokenText(Token token) const -> llvm::StringRef;
 
   // Returns the identifier associated with this token. The token kind must be
   // an `Identifier`.
-  auto GetIdentifier(Token token) const -> Identifier;
+  [[nodiscard]] auto GetIdentifier(Token token) const -> Identifier;
 
   // Returns the value of an `IntegerLiteral()` token.
   auto GetIntegerLiteral(Token token) const -> llvm::APInt;
@@ -198,26 +220,26 @@ class TokenizedBuffer {
   // Returns the closing token matched with the given opening token.
   //
   // The given token must be an opening token kind.
-  auto GetMatchedClosingToken(Token opening_token) const -> Token;
+  [[nodiscard]] auto GetMatchedClosingToken(Token opening_token) const -> Token;
 
   // Returns the opening token matched with the given closing token.
   //
   // The given token must be a closing token kind.
-  auto GetMatchedOpeningToken(Token closing_token) const -> Token;
+  [[nodiscard]] auto GetMatchedOpeningToken(Token closing_token) const -> Token;
 
   // Returns whether the token was created as part of an error recovery effort.
   //
   // For example, a closing paren inserted to match an unmatched paren.
-  auto IsRecoveryToken(Token token) const -> bool;
+  [[nodiscard]] auto IsRecoveryToken(Token token) const -> bool;
 
   // Returns the 1-based line number.
-  auto GetLineNumber(Line line) const -> int;
+  [[nodiscard]] auto GetLineNumber(Line line) const -> int;
 
   // Returns the 1-based indentation column number.
-  auto GetIndentColumnNumber(Line line) const -> int;
+  [[nodiscard]] auto GetIndentColumnNumber(Line line) const -> int;
 
   // Returns the text for an identifier.
-  auto GetIdentifierText(Identifier id) const -> llvm::StringRef;
+  [[nodiscard]] auto GetIdentifierText(Identifier id) const -> llvm::StringRef;
 
   // Prints a description of the tokenized stream to the provided `raw_ostream`.
   //
@@ -314,12 +336,12 @@ class TokenizedBuffer {
   explicit TokenizedBuffer(SourceBuffer& source) : source(&source) {}
 
   auto GetLineInfo(Line line) -> LineInfo&;
-  auto GetLineInfo(Line line) const -> const LineInfo&;
+  [[nodiscard]] auto GetLineInfo(Line line) const -> const LineInfo&;
   auto AddLine(LineInfo info) -> Line;
   auto GetTokenInfo(Token token) -> TokenInfo&;
-  auto GetTokenInfo(Token token) const -> const TokenInfo&;
+  [[nodiscard]] auto GetTokenInfo(Token token) const -> const TokenInfo&;
   auto AddToken(TokenInfo info) -> Token;
-  auto GetTokenPrintWidths(Token token) const -> PrintWidths;
+  [[nodiscard]] auto GetTokenPrintWidths(Token token) const -> PrintWidths;
   auto PrintToken(llvm::raw_ostream& output_stream, Token token,
                   PrintWidths widths) const -> void;
 

+ 5 - 2
lexer/tokenized_buffer_fuzzer.cpp

@@ -2,6 +2,7 @@
 // Exceptions. See /LICENSE for license information.
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 
+#include <cstdint>
 #include <cstring>
 
 #include "diagnostics/diagnostic_emitter.h"
@@ -10,12 +11,14 @@
 
 namespace Carbon {
 
-extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data, size_t size) {
+// NOLINTNEXTLINE: Match the documented fuzzer entry point declaration style.
+extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data,
+                                      std::size_t size) {
   // We need two bytes of data to compute a file name length.
   if (size < 2) {
     return 0;
   }
-  unsigned short raw_filename_length;
+  uint16_t raw_filename_length;
   std::memcpy(&raw_filename_length, data, 2);
   data += 2;
   size -= 2;

+ 4 - 2
lexer/tokenized_buffer_test_helpers.h

@@ -31,8 +31,8 @@ struct ExpectedToken {
   bool recovery = false;
   llvm::StringRef text = "";
 
-  friend std::ostream& operator<<(std::ostream& output,
-                                  const ExpectedToken& expected) {
+  friend auto operator<<(std::ostream& output, const ExpectedToken& expected)
+      -> std::ostream& {
     output << "\ntoken: { kind: '" << expected.kind.Name().str();
     if (expected.line != -1) {
       output << "', line: " << expected.line;
@@ -57,6 +57,7 @@ struct ExpectedToken {
 // TODO: Consider rewriting this into a `TokenEq` matcher which is used inside
 // `ElementsAre`. If that isn't easily done, potentially worth checking for size
 // mismatches first.
+// NOLINTNEXTLINE: Expands from GoogleTest.
 MATCHER_P(HasTokens, raw_all_expected, "") {
   const TokenizedBuffer& buffer = arg;
   llvm::ArrayRef<ExpectedToken> all_expected = raw_all_expected;
@@ -133,6 +134,7 @@ MATCHER_P(HasTokens, raw_all_expected, "") {
   return matches;
 }
 
+// NOLINTNEXTLINE: Expands from GoogleTest.
 MATCHER_P2(IsKeyValueScalars, key, value, "") {
   auto* kv_node = llvm::dyn_cast<llvm::yaml::KeyValueNode>(arg);
   if (!kv_node) {

+ 7 - 4
parser/parse_node_kind.h

@@ -27,8 +27,10 @@ class ParseNodeKind {
  public:
   // The formatting for this macro is weird due to a `clang-format` bug. See
   // https://bugs.llvm.org/show_bug.cgi?id=48320 for details.
-#define CARBON_PARSE_NODE_KIND(Name) \
-  static constexpr auto Name()->ParseNodeKind { return KindEnum::Name; }
+#define CARBON_PARSE_NODE_KIND(Name)            \
+  static constexpr auto Name()->ParseNodeKind { \
+    return ParseNodeKind(KindEnum::Name);       \
+  }
 #include "parser/parse_node_kind.def"
 
   // The default constructor is deleted as objects of this type should always be
@@ -51,12 +53,13 @@ class ParseNodeKind {
 #include "parser/parse_node_kind.def"
   };
 
-  constexpr ParseNodeKind(KindEnum k) : kind(k) {}
+  constexpr explicit ParseNodeKind(KindEnum k) : kind(k) {}
 
   // Enable conversion to our private enum, including in a `constexpr` context,
   // to enable usage in `switch` and `case`. The enum remains private and
   // nothing else should be using this.
-  explicit constexpr operator KindEnum() const { return kind; }
+  // NOLINTNEXTLINE(google-explicit-constructor)
+  constexpr operator KindEnum() const { return kind; }
 
   KindEnum kind;
 };

+ 5 - 4
parser/parse_test_helpers.h

@@ -50,7 +50,8 @@ struct ExpectedNode {
 class ExpectedNodesMatcher
     : public ::testing::MatcherInterface<const ParseTree&> {
  public:
-  ExpectedNodesMatcher(llvm::SmallVector<ExpectedNode, 0> expected_nodess)
+  explicit ExpectedNodesMatcher(
+      llvm::SmallVector<ExpectedNode, 0> expected_nodess)
       : expected_nodes(std::move(expected_nodess)) {}
 
   auto MatchAndExplain(const ParseTree& tree,
@@ -60,7 +61,7 @@ class ExpectedNodesMatcher
 
  private:
   auto MatchExpectedNode(const ParseTree& tree, ParseTree::Node n,
-                         int postorder_index, ExpectedNode expected_node,
+                         int postorder_index, const ExpectedNode& expected_node,
                          ::testing::MatchResultListener& output) const -> bool;
 
   llvm::SmallVector<ExpectedNode, 0> expected_nodes;
@@ -221,8 +222,8 @@ inline auto ExpectedNodesMatcher::DescribeTo(std::ostream* output_ptr) const
 
 inline auto ExpectedNodesMatcher::MatchExpectedNode(
     const ParseTree& tree, ParseTree::Node n, int postorder_index,
-    ExpectedNode expected_node, ::testing::MatchResultListener& output) const
-    -> bool {
+    const ExpectedNode& expected_node,
+    ::testing::MatchResultListener& output) const -> bool {
   bool matches = true;
 
   ParseNodeKind kind = tree.GetNodeKind(n);

+ 2 - 1
parser/parse_tree_fuzzer.cpp

@@ -3,6 +3,7 @@
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 
 #include <cstddef>
+#include <cstdint>
 #include <cstring>
 
 #include "diagnostics/diagnostic_emitter.h"
@@ -19,7 +20,7 @@ extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data,
   if (size < 2) {
     return 0;
   }
-  unsigned short raw_filename_length;
+  uint16_t raw_filename_length;
   std::memcpy(&raw_filename_length, data, 2);
   data += 2;
   size -= 2;

+ 5 - 2
parser/parser_impl.cpp

@@ -52,7 +52,7 @@ auto ParseTree::Parser::ConsumeIf(TokenKind kind)
 auto ParseTree::Parser::AddLeafNode(ParseNodeKind kind,
                                     TokenizedBuffer::Token token) -> Node {
   Node n(tree.node_impls.size());
-  tree.node_impls.push_back(NodeImpl(kind, token, /*SubtreeSize=*/1));
+  tree.node_impls.push_back(NodeImpl(kind, token, /*subtree_size_arg=*/1));
   return n;
 }
 
@@ -94,7 +94,7 @@ auto ParseTree::Parser::AddNode(ParseNodeKind n_kind, TokenizedBuffer::Token t,
                                 SubtreeStart& start, bool has_error) -> Node {
   // The size of the subtree is the change in size from when we started this
   // subtree to now, but including the node we're about to add.
-  int tree_stop_size = tree.node_impls.size() + 1;
+  int tree_stop_size = static_cast<int>(tree.node_impls.size()) + 1;
   int subtree_size = tree_stop_size - start.tree_size;
 
   Node n(tree.node_impls.size());
@@ -346,6 +346,9 @@ auto ParseTree::Parser::ParseDeclaration() -> llvm::Optional<Node> {
       return ParseFunctionDeclaration();
     case TokenKind::Semi():
       return ParseEmptyDeclaration();
+    default:
+      // Errors are handled outside the switch.
+      break;
   }
 
   // We didn't recognize an introducer for a valid declaration.

+ 4 - 3
source/source_buffer.cpp

@@ -4,13 +4,13 @@
 
 #include "source/source_buffer.h"
 
-#include <errno.h>
 #include <fcntl.h>
-#include <stdint.h>
 #include <sys/mman.h>
 #include <sys/stat.h>
 #include <unistd.h>
 
+#include <cerrno>
+#include <cstdint>
 #include <system_error>
 
 #include "llvm/ADT/ScopeExit.h"
@@ -84,7 +84,8 @@ SourceBuffer::~SourceBuffer() {
   if (!text_.empty()) {
     errno = 0;
     int result =
-        munmap(const_cast<void*>((const void*)text_.data()), text_.size());
+        munmap(const_cast<void*>(static_cast<const void*>(text_.data())),
+               text_.size());
     (void)result;
     assert(result != -1 && "Unmapping text failed!");
   }

+ 4 - 4
source/source_buffer.h

@@ -46,7 +46,7 @@ class SourceBuffer {
   // comment for details.
   SourceBuffer(const SourceBuffer& arg) = delete;
 
-  SourceBuffer(SourceBuffer&& arg)
+  SourceBuffer(SourceBuffer&& arg) noexcept
       : filename_(std::move(arg.filename_)),
         text_(arg.text_),
         is_string_rep_(arg.is_string_rep_) {
@@ -66,9 +66,9 @@ class SourceBuffer {
 
   ~SourceBuffer();
 
-  llvm::StringRef Filename() const { return filename_; }
+  [[nodiscard]] auto Filename() const -> llvm::StringRef { return filename_; }
 
-  llvm::StringRef Text() const { return text_; }
+  [[nodiscard]] auto Text() const -> llvm::StringRef { return text_; }
 
  private:
   std::string filename_;
@@ -87,7 +87,7 @@ class SourceBuffer {
   explicit SourceBuffer(llvm::StringRef fake_filename, std::string buffer_text)
       : filename_(fake_filename.str()),
         is_string_rep_(true),
-        string_storage_(buffer_text) {
+        string_storage_(std::move(buffer_text)) {
     text_ = string_storage_;
   }