Forráskód Böngészése

Convert Python type hinting to be PEP-585 Compliant (#4083)

Python [PEP-585](https://peps.python.org/pep-0585/) replaces a number of
`typing` module types with built-in equivalents and `collections.abc`
versions as of Python 3.9, with the aim of eventually removing the
`typing` module versions of these classes altogether. Since the minimum
required version of Python listed in the [Contribution Tools
document](https://github.com/carbon-language/carbon-lang/blob/trunk/docs/project/contribution_tools.md#main-tools)
is 3.9, the type hints in the various python files in the repo can be
updated to this style of type hint without a need for backwards
compatibility.

Feel free to close if this isn't a desired change at this time!

---------

Co-authored-by: Jon Ross-Perkins <jperkins@google.com>
Jack McCluskey 1 éve
szülő
commit
319c3caf99

+ 6 - 5
github_tools/github_helpers.py

@@ -10,8 +10,9 @@ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 """
 
 import argparse
+from collections.abc import Generator
 import os
-from typing import Dict, Generator, Optional, Tuple
+from typing import Optional
 
 # https://pypi.org/project/gql/
 import gql  # type: ignore
@@ -54,16 +55,16 @@ class Client:
         )
         self._client = gql.Client(transport=transport)
 
-    def execute(self, query: str) -> Dict:
+    def execute(self, query: str) -> dict:
         """Runs a query."""
         return self._client.execute(gql.gql(query))  # type: ignore
 
     def execute_and_paginate(
         self,
         query: str,
-        path: Tuple[str, ...],
-        first_page: Optional[Dict] = None,
-    ) -> Generator[Dict, None, None]:
+        path: tuple[str, ...],
+        first_page: Optional[dict] = None,
+    ) -> Generator[dict, None, None]:
         """Runs a query with pagination.
 
         Arguments:

+ 14 - 14
github_tools/pr_comments.py

@@ -14,7 +14,7 @@ import hashlib
 import os
 import importlib.util
 import textwrap
-from typing import Any, Dict, Callable, List, Optional, Tuple
+from typing import Any, Callable, Optional
 
 
 # Do some extra work to support direct runs.
@@ -121,7 +121,7 @@ class _Comment:
         self.body = body
 
     @staticmethod
-    def from_raw_comment(raw_comment: Dict) -> "_Comment":
+    def from_raw_comment(raw_comment: dict) -> "_Comment":
         """Creates the comment from a raw comment dict."""
         return _Comment(
             raw_comment["author"]["login"],
@@ -168,7 +168,7 @@ class _Comment:
 class _PRComment(_Comment):
     """A comment on the top-level PR."""
 
-    def __init__(self, raw_comment: Dict):
+    def __init__(self, raw_comment: dict):
         super().__init__(
             raw_comment["author"]["login"],
             raw_comment["createdAt"],
@@ -186,7 +186,7 @@ class _PRComment(_Comment):
 class _Thread:
     """A review thread on a line of code."""
 
-    def __init__(self, parsed_args: argparse.Namespace, thread: Dict):
+    def __init__(self, parsed_args: argparse.Namespace, thread: dict):
         self.is_resolved: bool = thread["isResolved"]
 
         comments = thread["comments"]["nodes"]
@@ -261,7 +261,7 @@ class _Thread:
         return False
 
 
-def _parse_args(args: Optional[List[str]] = None) -> argparse.Namespace:
+def _parse_args(args: Optional[list[str]] = None) -> argparse.Namespace:
     """Parses command-line arguments and flags."""
     parser = argparse.ArgumentParser(description="Lists comments on a PR.")
     parser.add_argument(
@@ -337,8 +337,8 @@ def _query(
 
 def _accumulate_pr_comment(
     parsed_args: argparse.Namespace,
-    comments: List[_PRComment],
-    raw_comment: Dict,
+    comments: list[_PRComment],
+    raw_comment: dict,
 ) -> None:
     """Collects top-level comments and reviews."""
     # Elide reviews that have no top-level comment body.
@@ -348,8 +348,8 @@ def _accumulate_pr_comment(
 
 def _accumulate_thread(
     parsed_args: argparse.Namespace,
-    threads_by_path: Dict[str, List[_Thread]],
-    raw_thread: Dict,
+    threads_by_path: dict[str, list[_Thread]],
+    raw_thread: dict,
 ) -> None:
     """Adds threads to threads_by_path for later sorting."""
     thread = _Thread(parsed_args, raw_thread)
@@ -378,10 +378,10 @@ def _accumulate_thread(
 
 def _paginate(
     field_name: str,
-    accumulator: Callable[[argparse.Namespace, Any, Dict], None],
+    accumulator: Callable[[argparse.Namespace, Any, dict], None],
     parsed_args: argparse.Namespace,
     client: github_helpers.Client,
-    main_result: Dict,
+    main_result: dict,
     output: Any,
 ) -> None:
     """Paginates through the given field_name, accumulating results."""
@@ -395,7 +395,7 @@ def _paginate(
 
 def _fetch_comments(
     parsed_args: argparse.Namespace,
-) -> Tuple[List[_PRComment], Dict[str, List[_Thread]]]:
+) -> tuple[list[_PRComment], dict[str, list[_Thread]]]:
     """Fetches comments and review threads from GitHub."""
     # Each _query call will print a '.' for progress.
     print(
@@ -412,7 +412,7 @@ def _fetch_comments(
     pull_request = main_result["repository"]["pullRequest"]
 
     # Paginate comments, reviews, and review threads.
-    comments: List[_PRComment] = []
+    comments: list[_PRComment] = []
     _paginate(
         "comments",
         _accumulate_pr_comment,
@@ -430,7 +430,7 @@ def _fetch_comments(
         main_result,
         comments,
     )
-    threads_by_path: Dict[str, List[_Thread]] = {}
+    threads_by_path: dict[str, list[_Thread]] = {}
     _paginate(
         "reviewThreads",
         _accumulate_thread,

+ 2 - 2
migrate_cpp/migrate_cpp.py

@@ -11,7 +11,7 @@ import glob
 import os
 import subprocess
 import sys
-from typing import List, Optional
+from typing import Optional
 
 _CPP_REFACTORING = "./cpp_refactoring/cpp_refactoring"
 _H_EXTS = {".h", ".hpp"}
@@ -21,7 +21,7 @@ _CPP_EXTS = {".c", ".cc", ".cpp", ".cxx"}
 class _Workflow:
     _parsed_args: argparse.Namespace
     _data_dir: str
-    _cpp_files: Optional[List[str]]
+    _cpp_files: Optional[list[str]]
 
     def __init__(self) -> None:
         """Parses command-line arguments and flags."""

+ 4 - 3
scripts/check_header_guards.py

@@ -8,10 +8,11 @@ Exceptions. See /LICENSE for license information.
 SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 """
 
+from collections.abc import Iterable
 from pathlib import Path
 import re
 import sys
-from typing import Iterable, List, NamedTuple, Optional
+from typing import NamedTuple, Optional
 
 
 class Guard(NamedTuple):
@@ -22,7 +23,7 @@ class Guard(NamedTuple):
 
 
 def find_guard(
-    lines: List[str], pattern: str, from_end: bool
+    lines: list[str], pattern: str, from_end: bool
 ) -> Optional[Guard]:
     """Searches the lines for something matching the pattern."""
     lines_range: Iterable[str] = lines
@@ -38,7 +39,7 @@ def find_guard(
 
 
 def maybe_replace(
-    lines: List[str], old_guard: Guard, guard_prefix: str, guard: str
+    lines: list[str], old_guard: Guard, guard_prefix: str, guard: str
 ) -> None:
     """Replaces a header guard in the file if needed."""
     if guard != old_guard.guard:

+ 23 - 23
scripts/fix_cc_deps.py

@@ -16,7 +16,7 @@ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 
 import re
 import subprocess
-from typing import Callable, Dict, List, NamedTuple, Set, Tuple
+from typing import Callable, NamedTuple
 from xml.etree import ElementTree
 
 import scripts_utils
@@ -35,12 +35,12 @@ class RuleChoice(NamedTuple):
     # Whether to use "" or <> for the include.
     use_system_include: bool
     # Possible rules that may be used.
-    rules: Set[str]
+    rules: set[str]
 
 
 # Maps external repository names to a method translating bazel labels to file
 # paths for that repository.
-EXTERNAL_REPOS: Dict[str, ExternalRepo] = {
+EXTERNAL_REPOS: dict[str, ExternalRepo] = {
     # llvm:include/llvm/Support/Error.h ->llvm/Support/Error.h
     # clang-tools-extra/clangd:URI.h -> clang-tools-extra/clangd/URI.h
     "@llvm-project": ExternalRepo(
@@ -92,15 +92,15 @@ IGNORE_SOURCE_FILE_REGEX = re.compile(
 class Rule(NamedTuple):
     # For cc_* rules:
     # The hdrs + textual_hdrs attributes, as relative paths to the file.
-    hdrs: Set[str]
+    hdrs: set[str]
     # The srcs attribute, as relative paths to the file.
-    srcs: Set[str]
+    srcs: set[str]
     # The deps attribute, as full bazel labels.
-    deps: Set[str]
+    deps: set[str]
 
     # For genrules:
     # The outs attribute, as relative paths to the file.
-    outs: Set[str]
+    outs: set[str]
 
 
 def remap_file(label: str) -> str:
@@ -114,13 +114,13 @@ def remap_file(label: str) -> str:
     return EXTERNAL_REPOS[repo].remap(path)
 
 
-def get_bazel_list(list_child: ElementTree.Element, is_file: bool) -> Set[str]:
+def get_bazel_list(list_child: ElementTree.Element, is_file: bool) -> set[str]:
     """Returns the contents of a bazel list.
 
     The return will normally be the full label, unless `is_file` is set, in
     which case the label will be translated to the underlying file.
     """
-    results: Set[str] = set()
+    results: set[str] = set()
     for label in list_child:
         assert label.tag in ("label", "output"), label.tag
         value = label.attrib["value"]
@@ -130,7 +130,7 @@ def get_bazel_list(list_child: ElementTree.Element, is_file: bool) -> Set[str]:
     return results
 
 
-def get_rules(bazel: str, targets: str, keep_going: bool) -> Dict[str, Rule]:
+def get_rules(bazel: str, targets: str, keep_going: bool) -> dict[str, Rule]:
     """Queries the specified targets, returning the found rules.
 
     keep_going will be set to true for external repositories, where sometimes we
@@ -153,14 +153,14 @@ def get_rules(bazel: str, targets: str, keep_going: bool) -> Dict[str, Rule]:
     if p.returncode not in {0, 3}:
         print(p.stderr)
         exit(f"bazel query returned {p.returncode}")
-    rules: Dict[str, Rule] = {}
+    rules: dict[str, Rule] = {}
     for rule_xml in ElementTree.fromstring(p.stdout):
         assert rule_xml.tag == "rule", rule_xml.tag
         rule_name = rule_xml.attrib["name"]
-        hdrs: Set[str] = set()
-        srcs: Set[str] = set()
-        deps: Set[str] = set()
-        outs: Set[str] = set()
+        hdrs: set[str] = set()
+        srcs: set[str] = set()
+        deps: set[str] = set()
+        outs: set[str] = set()
         rule_class = rule_xml.attrib["class"]
         for list_child in rule_xml.findall("list"):
             list_name = list_child.attrib["name"]
@@ -183,7 +183,7 @@ def get_rules(bazel: str, targets: str, keep_going: bool) -> Dict[str, Rule]:
 
 
 def map_headers(
-    header_to_rule_map: Dict[str, RuleChoice], rules: Dict[str, Rule]
+    header_to_rule_map: dict[str, RuleChoice], rules: dict[str, Rule]
 ) -> None:
     """Accumulates headers provided by rules into the map.
 
@@ -212,16 +212,16 @@ def map_headers(
 
 
 def get_missing_deps(
-    header_to_rule_map: Dict[str, RuleChoice],
-    generated_files: Set[str],
+    header_to_rule_map: dict[str, RuleChoice],
+    generated_files: set[str],
     rule: Rule,
-) -> Tuple[Set[str], bool]:
+) -> tuple[set[str], bool]:
     """Returns missing dependencies for the rule.
 
     On return, the set is dependency labels that should be added; the bool
     indicates whether some where omitted due to ambiguity.
     """
-    missing_deps: Set[str] = set()
+    missing_deps: set[str] = set()
     ambiguous = False
     rule_files = rule.hdrs.union(rule.srcs)
     for source_file in rule_files:
@@ -297,17 +297,17 @@ def main() -> None:
     external_rules = get_rules(bazel, external_repo_query, True)
 
     print("Building header map...")
-    header_to_rule_map: Dict[str, RuleChoice] = {}
+    header_to_rule_map: dict[str, RuleChoice] = {}
     map_headers(header_to_rule_map, carbon_rules)
     map_headers(header_to_rule_map, external_rules)
 
     print("Building generated file list...")
-    generated_files: Set[str] = set()
+    generated_files: set[str] = set()
     for rule in carbon_rules.values():
         generated_files = generated_files.union(rule.outs)
 
     print("Parsing headers from source files...")
-    all_missing_deps: List[Tuple[str, Set[str]]] = []
+    all_missing_deps: list[tuple[str, set[str]]] = []
     any_ambiguous = False
     for rule_name, rule in carbon_rules.items():
         missing_deps, ambiguous = get_missing_deps(

+ 2 - 2
scripts/scripts_utils.py

@@ -14,7 +14,7 @@ from pathlib import Path
 import platform
 import shutil
 import time
-from typing import Dict, Optional
+from typing import Optional
 import urllib.request
 
 _BAZEL_TOOLS_URL = (
@@ -161,7 +161,7 @@ def _get_platform_ext() -> str:
         return ""
 
 
-def _select_hash(hashes: Dict[str, str], version: str) -> str:
+def _select_hash(hashes: dict[str, str], version: str) -> str:
     # Ensure the platform version is supported and has a hash.
     if version not in hashes:
         # If this because a platform support issue, we may need to print errors.

+ 3 - 3
scripts/source_stats.py

@@ -16,7 +16,7 @@ from multiprocessing import Pool
 import re
 import termplotlib as tpl  # type:ignore
 from pathlib import Path
-from typing import Dict, List, Optional
+from typing import Optional
 from dataclasses import dataclass, field, asdict
 from collections import Counter
 
@@ -167,7 +167,7 @@ def scan_file(file: Path) -> Stats:
     return stats
 
 
-def parse_args(args: Optional[List[str]] = None) -> argparse.Namespace:
+def parse_args(args: Optional[list[str]] = None) -> argparse.Namespace:
     """Parsers command-line arguments and flags."""
     parser = argparse.ArgumentParser(description=__doc__)
     parser.add_argument(
@@ -232,7 +232,7 @@ Fraction IDs: {stats.identifiers / tokens}
     )
 
     def print_histogram(
-        title: str, data: Dict[int, int], column_format: str
+        title: str, data: dict[int, int], column_format: str
     ) -> None:
         print()
         key_min = min(data.keys())