Просмотр исходного кода

Require braces on conditions and loops. (#218)

The rationale and rule for this was added in #194 to the C++ style guide
we are using for Carbon.

I've applied the automated fixes from running `clang-tidy` over all the
code, and then run `clang-format` afterward.

There are a few places where `clang-format` fixed a formatting issue
that snuck through in prior commits. These were rare enough that it
didn't seem worth splitting them out into a separate change.
Chandler Carruth 5 лет назад
Родитель
Сommit
27386db279

+ 1 - 1
.clang-tidy

@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 
 ---
-Checks: '-*,readability-identifier-naming'
+Checks: '-*,readability-braces-around-statements,readability-identifier-naming'
 WarningsAsErrors: true
 CheckOptions:
   - { key: readability-identifier-naming.ClassCase, value: CamelCase }

+ 42 - 23
lexer/tokenized_buffer.cpp

@@ -105,8 +105,9 @@ class TokenizedBuffer::Lexer {
           ++current_column;
           source_text = source_text.drop_front();
         }
-        if (source_text.empty())
+        if (source_text.empty()) {
           break;
+        }
       }
 
       switch (source_text.front()) {
@@ -121,8 +122,9 @@ class TokenizedBuffer::Lexer {
           // If this is the last character in the source, directly return here
           // to avoid creating an empty line.
           source_text = source_text.drop_front();
-          if (source_text.empty())
+          if (source_text.empty()) {
             return false;
+          }
 
           // Otherwise, add a line and set up to continue lexing.
           current_line = buffer.AddLine(
@@ -153,11 +155,13 @@ class TokenizedBuffer::Lexer {
 
   auto LexIntegerLiteral(llvm::StringRef& source_text) -> bool {
     llvm::StringRef int_text = TakeLeadingIntegerLiteral(source_text);
-    if (int_text.empty())
+    if (int_text.empty()) {
       return false;
+    }
     llvm::APInt int_value;
-    if (int_text.getAsInteger(/*Radix=*/0, int_value))
+    if (int_text.getAsInteger(/*Radix=*/0, int_value)) {
       return false;
+    }
 
     int int_column = current_column;
     current_column += int_text.size();
@@ -181,8 +185,9 @@ class TokenizedBuffer::Lexer {
   .StartsWith(Spelling, TokenKind::Name())
 #include "lexer/token_registry.def"
                          .Default(TokenKind::Error());
-    if (kind == TokenKind::Error())
+    if (kind == TokenKind::Error()) {
       return false;
+    }
 
     if (!set_indent) {
       current_line_info->indent = current_column;
@@ -203,8 +208,9 @@ class TokenizedBuffer::Lexer {
     }
 
     // Only closing symbols need further special handling.
-    if (!kind.IsClosingSymbol())
+    if (!kind.IsClosingSymbol()) {
       return true;
+    }
 
     TokenInfo& closing_token_info = buffer.GetTokenInfo(token);
 
@@ -232,14 +238,16 @@ class TokenizedBuffer::Lexer {
   // Closes all open groups that cannot remain open across the symbol `K`.
   // Users may pass `Error` to close all open groups.
   auto CloseInvalidOpenGroups(TokenKind kind) -> void {
-    if (!kind.IsClosingSymbol() && kind != TokenKind::Error())
+    if (!kind.IsClosingSymbol() && kind != TokenKind::Error()) {
       return;
+    }
 
     while (!open_groups.empty()) {
       Token opening_token = open_groups.back();
       TokenKind opening_kind = buffer.GetTokenInfo(opening_token).kind;
-      if (kind == opening_kind.GetClosingSymbol())
+      if (kind == opening_kind.GetClosingSymbol()) {
         return;
+      }
 
       open_groups.pop_back();
       buffer.has_errors = true;
@@ -263,14 +271,16 @@ class TokenizedBuffer::Lexer {
   auto GetOrCreateIdentifier(llvm::StringRef text) -> Identifier {
     auto insert_result = buffer.identifier_map.insert(
         {text, Identifier(buffer.identifier_infos.size())});
-    if (insert_result.second)
+    if (insert_result.second) {
       buffer.identifier_infos.push_back({text});
+    }
     return insert_result.first->second;
   }
 
   auto LexKeywordOrIdentifier(llvm::StringRef& source_text) -> bool {
-    if (!llvm::isAlpha(source_text.front()) && source_text.front() != '_')
+    if (!llvm::isAlpha(source_text.front()) && source_text.front() != '_') {
       return false;
+    }
 
     if (!set_indent) {
       current_line_info->indent = current_column;
@@ -307,8 +317,9 @@ class TokenizedBuffer::Lexer {
 
   auto LexError(llvm::StringRef& source_text) -> void {
     llvm::StringRef error_text = source_text.take_while([](char c) {
-      if (llvm::isAlnum(c))
+      if (llvm::isAlnum(c)) {
         return false;
+      }
       switch (c) {
         case '_':
           return false;
@@ -355,12 +366,15 @@ auto TokenizedBuffer::Lex(SourceBuffer& source, DiagnosticEmitter& emitter)
   while (lexer.SkipWhitespace(source_text)) {
     // Each time we find non-whitespace characters, try each kind of token we
     // support lexing, from simplest to most complex.
-    if (lexer.LexSymbolToken(source_text))
+    if (lexer.LexSymbolToken(source_text)) {
       continue;
-    if (lexer.LexKeywordOrIdentifier(source_text))
+    }
+    if (lexer.LexKeywordOrIdentifier(source_text)) {
       continue;
-    if (lexer.LexIntegerLiteral(source_text))
+    }
+    if (lexer.LexIntegerLiteral(source_text)) {
       continue;
+    }
     lexer.LexError(source_text);
   }
 
@@ -387,8 +401,9 @@ auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
 auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
   auto& token_info = GetTokenInfo(token);
   llvm::StringRef fixed_spelling = token_info.kind.GetFixedSpelling();
-  if (!fixed_spelling.empty())
+  if (!fixed_spelling.empty()) {
     return fixed_spelling;
+  }
 
   if (token_info.kind == TokenKind::Error()) {
     auto& line_info = GetLineInfo(token_info.token_line);
@@ -475,8 +490,8 @@ auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
 auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
   PrintWidths widths = {};
   // Compute the printed width of the various token information. When numbers
-  // here are printed in decimal, the number of digits needed is is one more than
-  // the log-base-10 of the value.
+  // here are printed in decimal, the number of digits needed is is one more
+  // than the log-base-10 of the value.
   widths.index = std::log10(token_infos.size()) + 1;
   widths.kind = GetKind(token).Name().size();
   widths.line = std::log10(GetLineNumber(token)) + 1;
@@ -486,13 +501,15 @@ auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
 }
 
 auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
-  if (Tokens().begin() == Tokens().end())
+  if (Tokens().begin() == Tokens().end()) {
     return;
+  }
 
   PrintWidths widths = {};
   widths.index = std::log10(token_infos.size()) + 1;
-  for (Token token : Tokens())
+  for (Token token : Tokens()) {
     widths.Widen(GetTokenPrintWidths(token));
+  }
 
   for (Token token : Tokens()) {
     PrintToken(output_stream, token, widths);
@@ -528,15 +545,17 @@ auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
                            widths.indent),
       token_text);
 
-  if (token_info.kind == TokenKind::Identifier())
+  if (token_info.kind == TokenKind::Identifier()) {
     output_stream << ", identifier: " << GetIdentifier(token).index;
-  else if (token_info.kind.IsOpeningSymbol())
+  } else if (token_info.kind.IsOpeningSymbol()) {
     output_stream << ", closing_token: " << GetMatchedClosingToken(token).index;
-  else if (token_info.kind.IsClosingSymbol())
+  } else if (token_info.kind.IsClosingSymbol()) {
     output_stream << ", opening_token: " << GetMatchedOpeningToken(token).index;
+  }
 
-  if (token_info.is_recovery)
+  if (token_info.is_recovery) {
     output_stream << ", recovery: true";
+  }
 
   output_stream << " }";
 }

+ 6 - 3
lexer/tokenized_buffer_fuzzer.cpp

@@ -12,8 +12,9 @@ namespace Carbon {
 
 extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data, size_t size) {
   // We need two bytes of data to compute a file name length.
-  if (size < 2)
+  if (size < 2) {
     return 0;
+  }
   unsigned short raw_filename_length;
   std::memcpy(&raw_filename_length, data, 2);
   data += 2;
@@ -21,8 +22,9 @@ extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data, size_t size) {
   size_t filename_length = raw_filename_length;
 
   // We need enough data to populate this filename length.
-  if (size < filename_length)
+  if (size < filename_length) {
     return 0;
+  }
   llvm::StringRef filename(reinterpret_cast<const char*>(data),
                            filename_length);
   data += filename_length;
@@ -36,8 +38,9 @@ extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data, size_t size) {
   DiagnosticEmitter emitter = NullDiagnosticEmitter();
 
   auto buffer = TokenizedBuffer::Lex(source, emitter);
-  if (buffer.HasErrors())
+  if (buffer.HasErrors()) {
     return 0;
+  }
 
   // Walk the lexed and tokenized buffer to ensure it isn't corrupt in some way.
   //

+ 3 - 2
lexer/tokenized_buffer_test.cpp

@@ -524,10 +524,11 @@ auto GetAndDropLine(llvm::StringRef& text) -> std::string {
   auto newline_offset = text.find_first_of('\n');
   llvm::StringRef line = text.slice(0, newline_offset);
 
-  if (newline_offset != llvm::StringRef::npos)
+  if (newline_offset != llvm::StringRef::npos) {
     text = text.substr(newline_offset + 1);
-  else
+  } else {
     text = "";
+  }
 
   return line.str();
 }

+ 11 - 6
lexer/tokenized_buffer_test_helpers.h

@@ -34,16 +34,21 @@ struct ExpectedToken {
   friend std::ostream& operator<<(std::ostream& output,
                                   const ExpectedToken& expected) {
     output << "\ntoken: { kind: '" << expected.kind.Name().str();
-    if (expected.line != -1)
+    if (expected.line != -1) {
       output << "', line: " << expected.line;
-    if (expected.column != -1)
+    }
+    if (expected.column != -1) {
       output << ", column " << expected.column;
-    if (expected.indent_column != -1)
+    }
+    if (expected.indent_column != -1) {
       output << ", indent: " << expected.indent_column;
-    if (!expected.text.empty())
+    }
+    if (!expected.text.empty()) {
       output << ", spelling: '" << expected.text.str() << "'";
-    if (expected.recovery)
+    }
+    if (expected.recovery) {
       output << ", recovery: true";
+    }
     output << " }";
     return output;
   }
@@ -166,7 +171,7 @@ MATCHER_P2(IsKeyValueScalars, key, value, "") {
   return true;
 }
 
-}  // namespace testing
+}  // namespace Testing
 }  // namespace Carbon
 
 #endif  // LEXER_TOKENIZED_BUFFER_TEST_HELPERS_H_

+ 22 - 11
parser/parse_test_helpers.h

@@ -78,20 +78,23 @@ inline auto ExpectedNodesMatcher::MatchAndExplain(
   const auto nodes_end = rpo.end();
   auto nodes_it = nodes_begin;
   llvm::SmallVector<const ExpectedNode*, 16> expected_node_stack;
-  for (const ExpectedNode& en : expected_nodes)
+  for (const ExpectedNode& en : expected_nodes) {
     expected_node_stack.push_back(&en);
+  }
   while (!expected_node_stack.empty()) {
-    if (nodes_it == nodes_end)
+    if (nodes_it == nodes_end) {
       // We'll check the size outside the loop.
       break;
+    }
 
     ParseTree::Node n = *nodes_it++;
     int postorder_index = n.GetIndex();
 
     const ExpectedNode& expected_node = *expected_node_stack.pop_back_val();
 
-    if (!MatchExpectedNode(tree, n, postorder_index, expected_node, output))
+    if (!MatchExpectedNode(tree, n, postorder_index, expected_node, output)) {
       matches = false;
+    }
 
     if (expected_node.skip_subtree) {
       assert(expected_node.children.empty() &&
@@ -122,8 +125,9 @@ inline auto ExpectedNodesMatcher::MatchAndExplain(
     // causes the siblings to be visited in reverse order from the expected
     // list. However, we use a stack which inherently does this reverse for us
     // so we simply append to the stack here.
-    for (const ExpectedNode& child_expected_node : expected_node.children)
+    for (const ExpectedNode& child_expected_node : expected_node.children) {
       expected_node_stack.push_back(&child_expected_node);
+    }
   }
 
   // We don't directly check the size because we allow expectations to skip
@@ -162,30 +166,36 @@ inline auto ExpectedNodesMatcher::DescribeTo(std::ostream* output_ptr) const
   // of the actual parse tree.
   llvm::SmallVector<std::pair<const ExpectedNode*, int>, 16>
       expected_node_stack;
-  for (const ExpectedNode& expected_node : llvm::reverse(expected_nodes))
+  for (const ExpectedNode& expected_node : llvm::reverse(expected_nodes)) {
     expected_node_stack.push_back({&expected_node, 0});
+  }
 
   while (!expected_node_stack.empty()) {
     const ExpectedNode& expected_node = *expected_node_stack.back().first;
     int depth = expected_node_stack.back().second;
     expected_node_stack.pop_back();
-    for (int indent_count = 0; indent_count < depth; ++indent_count)
+    for (int indent_count = 0; indent_count < depth; ++indent_count) {
       output << "  ";
+    }
     output << "{kind: '" << expected_node.kind.GetName().str() << "'";
-    if (!expected_node.text.empty())
+    if (!expected_node.text.empty()) {
       output << ", text: '" << expected_node.text << "'";
-    if (expected_node.has_error)
+    }
+    if (expected_node.has_error) {
       output << ", has_error: yes";
-    if (expected_node.skip_subtree)
+    }
+    if (expected_node.skip_subtree) {
       output << ", skip_subtree: yes";
+    }
 
     if (!expected_node.children.empty()) {
       assert(!expected_node.skip_subtree &&
              "Must not have children and skip a subtree!");
       output << ", children: [\n";
       for (const ExpectedNode& child_expected_node :
-           llvm::reverse(expected_node.children))
+           llvm::reverse(expected_node.children)) {
         expected_node_stack.push_back({&child_expected_node, depth + 1});
+      }
       // If we have children, we know we're not popping off.
       continue;
     }
@@ -199,9 +209,10 @@ inline auto ExpectedNodesMatcher::DescribeTo(std::ostream* output_ptr) const
              "Cannot have an increase in depth on a leaf node!");
       // The distance we need to pop is the difference in depth.
       int pop_depth = depth - expected_node_stack.back().second;
-      for (int pop_count = 0; pop_count < pop_depth; ++pop_count)
+      for (int pop_count = 0; pop_count < pop_depth; ++pop_count) {
         // Close both the children array and the node mapping.
         output << "]}";
+      }
     }
     output << "\n";
   }

+ 10 - 5
parser/parse_tree.cpp

@@ -79,8 +79,9 @@ auto ParseTree::Print(llvm::raw_ostream& output) const -> void {
   // The roots, like siblings, are in RPO (so reversed), but we add them in
   // order here because we'll pop off the stack effectively reversing then.
   llvm::SmallVector<std::pair<Node, int>, 16> node_stack;
-  for (Node n : Roots())
+  for (Node n : Roots()) {
     node_stack.push_back({n, 0});
+  }
 
   while (!node_stack.empty()) {
     Node n;
@@ -96,8 +97,9 @@ auto ParseTree::Print(llvm::raw_ostream& output) const -> void {
     output << "{node_index: " << n.index << ", kind: '" << n_impl.kind.GetName()
            << "', text: '" << tokens->GetTokenText(n_impl.token) << "'";
 
-    if (n_impl.has_error)
+    if (n_impl.has_error) {
       output << ", has_error: yes";
+    }
 
     if (n_impl.subtree_size > 1) {
       output << ", subtree_size: " << n_impl.subtree_size;
@@ -105,8 +107,9 @@ auto ParseTree::Print(llvm::raw_ostream& output) const -> void {
       output << ", children: [\n";
       // We append the children in order here as well because they will get
       // reversed when popped off the stack.
-      for (Node sibling_n : Children(n))
+      for (Node sibling_n : Children(n)) {
         node_stack.push_back({sibling_n, depth + 1});
+      }
       continue;
     }
 
@@ -175,16 +178,18 @@ auto ParseTree::Verify() const -> bool {
     while (!ancestors.empty()) {
       ParseTree::Node parent_n = ancestors.back();
       if ((parent_n.GetIndex() -
-           node_impls[parent_n.GetIndex()].subtree_size) != next_index)
+           node_impls[parent_n.GetIndex()].subtree_size) != next_index) {
         break;
+      }
       ancestors.pop_back();
     }
   }
   if (!ancestors.empty()) {
     llvm::errs()
         << "Finished walking the parse tree and there are still ancestors:\n";
-    for (Node ancestor_n : ancestors)
+    for (Node ancestor_n : ancestors) {
       llvm::errs() << "  Node #" << ancestor_n.GetIndex() << "\n";
+    }
     return false;
   }
 

+ 8 - 4
parser/parse_tree_fuzzer.cpp

@@ -16,8 +16,9 @@ namespace Carbon {
 extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data,
                                       std::size_t size) {
   // We need two bytes of data to compute a file name length.
-  if (size < 2)
+  if (size < 2) {
     return 0;
+  }
   unsigned short raw_filename_length;
   std::memcpy(&raw_filename_length, data, 2);
   data += 2;
@@ -25,8 +26,9 @@ extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data,
   std::size_t filename_length = raw_filename_length;
 
   // We need enough data to populate this filename length.
-  if (size < filename_length)
+  if (size < filename_length) {
     return 0;
+  }
   llvm::StringRef filename(reinterpret_cast<const char*>(data),
                            filename_length);
   data += filename_length;
@@ -41,14 +43,16 @@ extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data,
 
   // Lex the input.
   auto tokens = TokenizedBuffer::Lex(source, emitter);
-  if (tokens.HasErrors())
+  if (tokens.HasErrors()) {
     return 0;
+  }
 
   // Now parse it into a tree. Note that parsing will (when asserts are enabled)
   // walk the entire tree to verify it so we don't have to do that here.
   ParseTree tree = ParseTree::Parse(tokens, emitter);
-  if (tree.HasErrors())
+  if (tree.HasErrors()) {
     return 0;
+  }
 
   // In the absence of parse errors, we should have exactly as many nodes as
   // tokens.

+ 3 - 2
parser/parse_tree_test.cpp

@@ -376,10 +376,11 @@ auto GetAndDropLine(llvm::StringRef& s) -> std::string {
   auto newline_offset = s.find_first_of('\n');
   llvm::StringRef line = s.slice(0, newline_offset);
 
-  if (newline_offset != llvm::StringRef::npos)
+  if (newline_offset != llvm::StringRef::npos) {
     s = s.substr(newline_offset + 1);
-  else
+  } else {
     s = "";
+  }
 
   return line.str();
 }

+ 20 - 10
parser/parser_impl.cpp

@@ -25,8 +25,9 @@ auto ParseTree::Parser::Parse(TokenizedBuffer& tokens,
   tree.node_impls.reserve(tokens.Size());
 
   Parser parser(tree, tokens);
-  while (parser.position != parser.end)
+  while (parser.position != parser.end) {
     parser.ParseDeclaration();
+  }
 
   assert(tree.Verify() && "Parse tree built but does not verify!");
   return tree;
@@ -41,8 +42,9 @@ auto ParseTree::Parser::Consume(TokenKind kind) -> TokenizedBuffer::Token {
 
 auto ParseTree::Parser::ConsumeIf(TokenKind kind)
     -> llvm::Optional<TokenizedBuffer::Token> {
-  if (tokens.GetKind(*position) != kind)
+  if (tokens.GetKind(*position) != kind) {
     return {};
+  }
 
   return *position++;
 }
@@ -58,8 +60,9 @@ auto ParseTree::Parser::ConsumeAndAddLeafNodeIf(TokenKind t_kind,
                                                 ParseNodeKind n_kind)
     -> llvm::Optional<Node> {
   auto t = ConsumeIf(t_kind);
-  if (!t)
+  if (!t) {
     return {};
+  }
 
   return AddLeafNode(n_kind, *t);
 }
@@ -96,8 +99,9 @@ auto ParseTree::Parser::AddNode(ParseNodeKind n_kind, TokenizedBuffer::Token t,
 
   Node n(tree.node_impls.size());
   tree.node_impls.push_back(NodeImpl(n_kind, t, subtree_size));
-  if (has_error)
+  if (has_error) {
     MarkNodeError(n);
+  }
 
   start.node_added = true;
   return n;
@@ -107,8 +111,9 @@ auto ParseTree::Parser::SkipMatchingGroup() -> bool {
   assert(position != end && "Cannot skip at the end!");
   TokenizedBuffer::Token t = *position;
   TokenKind t_kind = tokens.GetKind(t);
-  if (!t_kind.IsOpeningSymbol())
+  if (!t_kind.IsOpeningSymbol()) {
     return false;
+  }
 
   position = std::next(
       TokenizedBuffer::TokenIterator(tokens.GetMatchedClosingToken(t)));
@@ -118,8 +123,9 @@ auto ParseTree::Parser::SkipMatchingGroup() -> bool {
 auto ParseTree::Parser::SkipPastLikelyDeclarationEnd(
     TokenizedBuffer::Token skip_root, bool is_inside_declaration)
     -> llvm::Optional<Node> {
-  if (position == end)
+  if (position == end) {
     return {};
+  }
 
   TokenizedBuffer::Line root_line = tokens.GetLine(skip_root);
   int root_line_indent = tokens.GetIndentColumnNumber(root_line);
@@ -129,18 +135,20 @@ auto ParseTree::Parser::SkipPastLikelyDeclarationEnd(
   auto is_same_line_or_indent_greater_than_root =
       [&](TokenizedBuffer::Token t) {
         TokenizedBuffer::Line l = tokens.GetLine(t);
-        if (l == root_line)
+        if (l == root_line) {
           return true;
+        }
 
         return tokens.GetIndentColumnNumber(l) > root_line_indent;
       };
 
   do {
     TokenKind current_kind = tokens.GetKind(*position);
-    if (current_kind == TokenKind::CloseCurlyBrace())
+    if (current_kind == TokenKind::CloseCurlyBrace()) {
       // Immediately bail out if we hit an unmatched close curly, this will
       // pop us up a level of the syntax grouping.
       return {};
+    }
 
     // If we find a semicolon, we want to parse it to end the declaration.
     if (current_kind == TokenKind::Semi()) {
@@ -156,8 +164,9 @@ auto ParseTree::Parser::SkipPastLikelyDeclarationEnd(
     }
 
     // Skip over any matching group of tokens.
-    if (SkipMatchingGroup())
+    if (SkipMatchingGroup()) {
       continue;
+    }
 
     // Otherwise just step forward one token.
     ++position;
@@ -311,9 +320,10 @@ auto ParseTree::Parser::ParseFunctionDeclaration() -> Node {
     llvm::errs() << "ERROR: Function declaration not terminated by a "
                     "semicolon on line "
                  << tokens.GetLineNumber(close_paren) << "!\n";
-    if (tokens.GetLine(*position) == tokens.GetLine(close_paren))
+    if (tokens.GetLine(*position) == tokens.GetLine(close_paren)) {
       // Only need to skip if we've not already found a new line.
       SkipPastLikelyDeclarationEnd(function_intro_token);
+    }
     return add_error_function_node();
   }
 

+ 11 - 6
source/source_buffer.cpp

@@ -3,7 +3,6 @@
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 
 #include "source/source_buffer.h"
-#include "llvm/ADT/ScopeExit.h"
 
 #include <errno.h>
 #include <fcntl.h>
@@ -14,6 +13,8 @@
 
 #include <system_error>
 
+#include "llvm/ADT/ScopeExit.h"
+
 namespace Carbon {
 
 auto SourceBuffer::CreateFromText(llvm::Twine text, llvm::StringRef filename)
@@ -32,16 +33,19 @@ auto SourceBuffer::CreateFromFile(llvm::StringRef filename)
 
   errno = 0;
   int file_descriptor = open(buffer.filename_.c_str(), O_RDONLY);
-  if (file_descriptor == -1)
+  if (file_descriptor == -1) {
     return ErrnoToError(errno);
+  }
 
   // Now that we have an open file, we need to close it on any error.
-  auto closer = llvm::make_scope_exit([file_descriptor] { close(file_descriptor); });
+  auto closer =
+      llvm::make_scope_exit([file_descriptor] { close(file_descriptor); });
 
   struct stat stat_buffer = {};
   errno = 0;
-  if (fstat(file_descriptor, &stat_buffer) == -1)
+  if (fstat(file_descriptor, &stat_buffer) == -1) {
     return ErrnoToError(errno);
+  }
 
   int64_t size = stat_buffer.st_size;
   if (size == 0) {
@@ -51,9 +55,10 @@ auto SourceBuffer::CreateFromFile(llvm::StringRef filename)
 
   errno = 0;
   void* mapped_text = mmap(nullptr, size, PROT_READ, MAP_PRIVATE | MAP_POPULATE,
-                          file_descriptor, /*offset=*/0);
-  if (mapped_text == MAP_FAILED)
+                           file_descriptor, /*offset=*/0);
+  if (mapped_text == MAP_FAILED) {
     return ErrnoToError(errno);
+  }
 
   errno = 0;
   closer.release();

+ 3 - 1
source/source_buffer.h

@@ -85,7 +85,9 @@ class SourceBuffer {
   };
 
   explicit SourceBuffer(llvm::StringRef fake_filename, std::string buffer_text)
-      : filename_(fake_filename.str()), is_string_rep_(true), string_storage_(buffer_text) {
+      : filename_(fake_filename.str()),
+        is_string_rep_(true),
+        string_storage_(buffer_text) {
     text_ = string_storage_;
   }
 

+ 4 - 2
source/source_buffer_test.cpp

@@ -21,7 +21,8 @@ TEST(SourceBufferTest, StringRep) {
   EXPECT_EQ("Hello World", buffer.Text());
 
   // Give a custom filename.
-  auto buffer2 = SourceBuffer::CreateFromText("Hello World Again!", "/custom/text");
+  auto buffer2 =
+      SourceBuffer::CreateFromText("Hello World Again!", "/custom/text");
   EXPECT_EQ("/custom/text", buffer2.Filename());
   EXPECT_EQ("Hello World Again!", buffer2.Text());
 }
@@ -29,7 +30,8 @@ TEST(SourceBufferTest, StringRep) {
 auto CreateTestFile(llvm::StringRef text) -> std::string {
   int fd = -1;
   llvm::SmallString<1024> path;
-  auto error_code = llvm::sys::fs::createTemporaryFile("test_file", ".txt", fd, path);
+  auto error_code =
+      llvm::sys::fs::createTemporaryFile("test_file", ".txt", fd, path);
   if (error_code) {
     llvm::report_fatal_error(llvm::Twine("Failed to create temporary file: ") +
                              error_code.message());