source_stats.py 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. #!/usr/bin/env python3
  2. """Script to compute statistics about source code."""
  3. from __future__ import annotations
  4. __copyright__ = """
  5. Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  6. Exceptions. See /LICENSE for license information.
  7. SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  8. """
  9. import argparse
  10. from alive_progress import alive_bar # type:ignore
  11. from multiprocessing import Pool
  12. import re
  13. import termplotlib as tpl # type:ignore
  14. from pathlib import Path
  15. from typing import Optional
  16. from dataclasses import dataclass, field, asdict
  17. from collections import Counter
  18. BLANK_RE = re.compile(r"\s*")
  19. COMMENT_RE = re.compile(r"\s*///*\s*")
  20. LINE_RE = re.compile(
  21. r"""
  22. (?P<class_intro>\b(class|struct)\s+(?P<class_name>\w+)\b)|
  23. (?P<end_open_curly>{\s*(?P<open_curly_trailing_comment>//.*)?)|
  24. (?P<trailing_comment>//.*)|
  25. (?P<internal_comment>/\*.*\*/)|
  26. (?P<string_literal>"([^"]|\\")*"|'([^']|\\')*')|
  27. (?P<float_literal>\b(0[xb][0-9a-fA-F']*|[0-9][0-9']*)\.[0-9a-fA-F']*([eEpP][0-9a-fA-F']*)?)|
  28. (?P<int_literal>\b(0[xb][0-9a-fA-F']+|[0-9][0-9']*)([eEpP][0-9a-fA-F']*)?)|
  29. (?P<symbol>[\[\]{}(),.;]|[-+=!@#$%^&*/?|<>]+)|
  30. (?P<keyword>\b(auto|bool|break|case|catch|char|class|const|continue|default|do|double|else|enum|explicit|extern|false|float|for|friend|goto|if|inline|int|long|mutable|namespace|new|nullptr|operator|private|protected|public|return|short|signed|sizeof|static|struct|switch|template|this|throw|true|try|typedef|union|unsigned|using|virtual|void|while)\b)|
  31. (?P<id>\b\w+\b)
  32. """,
  33. re.X,
  34. )
  35. @dataclass
  36. class Stats:
  37. """Stats collected while scanning source files"""
  38. lines: int = 0
  39. blank_lines: int = 0
  40. comment_lines: int = 0
  41. empty_comment_lines: int = 0
  42. comment_line_widths: Counter[int] = field(default_factory=lambda: Counter())
  43. lines_with_trailing_comments: int = 0
  44. classes: int = 0
  45. internal_comments: int = 0
  46. string_literals: int = 0
  47. string_literals_per_line: Counter[int] = field(
  48. default_factory=lambda: Counter()
  49. )
  50. int_literals: int = 0
  51. int_literals_per_line: Counter[int] = field(
  52. default_factory=lambda: Counter()
  53. )
  54. float_literals: int = 0
  55. float_literals_per_line: Counter[int] = field(
  56. default_factory=lambda: Counter()
  57. )
  58. symbols: int = 0
  59. symbols_per_line: Counter[int] = field(default_factory=lambda: Counter())
  60. keywords: int = 0
  61. keywords_per_line: Counter[int] = field(default_factory=lambda: Counter())
  62. identifiers: int = 0
  63. identifier_widths: Counter[int] = field(default_factory=lambda: Counter())
  64. ids_per_line: Counter[int] = field(default_factory=lambda: Counter())
  65. def accumulate(self, other: Stats) -> None:
  66. self.lines += other.lines
  67. self.blank_lines += other.blank_lines
  68. self.empty_comment_lines += other.empty_comment_lines
  69. self.comment_lines += other.comment_lines
  70. self.comment_line_widths.update(other.comment_line_widths)
  71. self.lines_with_trailing_comments += other.lines_with_trailing_comments
  72. self.classes += other.classes
  73. self.internal_comments += other.internal_comments
  74. self.string_literals += other.string_literals
  75. self.string_literals_per_line.update(other.string_literals_per_line)
  76. self.int_literals += other.int_literals
  77. self.int_literals_per_line.update(other.int_literals_per_line)
  78. self.float_literals += other.float_literals
  79. self.float_literals_per_line.update(other.float_literals_per_line)
  80. self.symbols += other.symbols
  81. self.symbols_per_line.update(other.symbols_per_line)
  82. self.keywords += other.keywords
  83. self.keywords_per_line.update(other.keywords_per_line)
  84. self.identifiers += other.identifiers
  85. self.identifier_widths.update(other.identifier_widths)
  86. self.ids_per_line.update(other.ids_per_line)
  87. def scan_file(file: Path) -> Stats:
  88. """Scans the provided file and accumulates stats."""
  89. stats = Stats()
  90. for line in file.open():
  91. # Strip off the line endings.
  92. line = line.rstrip("\r\n")
  93. # Skip over super long lines that are often URLs or structured data that
  94. # doesn't match "normal" source code patterns.
  95. if len(line) > 80:
  96. continue
  97. stats.lines += 1
  98. if re.fullmatch(BLANK_RE, line):
  99. stats.blank_lines += 1
  100. continue
  101. if m := re.match(COMMENT_RE, line):
  102. stats.comment_lines += 1
  103. if m.end() == len(line):
  104. stats.empty_comment_lines += 1
  105. else:
  106. stats.comment_line_widths[len(line)] += 1
  107. continue
  108. line_string_literals = 0
  109. line_int_literals = 0
  110. line_float_literals = 0
  111. line_symbols = 0
  112. line_keywords = 0
  113. line_identifiers = 0
  114. for m in re.finditer(LINE_RE, line):
  115. if m.group("trailing_comment"):
  116. stats.lines_with_trailing_comments += 1
  117. break
  118. if m.group("class_intro"):
  119. stats.classes += 1
  120. line_keywords += 1
  121. line_identifiers += 1
  122. stats.identifier_widths[len(m.group("class_name"))] += 1
  123. elif m.group("end_open_curly"):
  124. line_symbols += 1
  125. elif m.group("internal_comment"):
  126. stats.internal_comments += 1
  127. elif m.group("string_literal"):
  128. line_string_literals += 1
  129. elif m.group("int_literal"):
  130. line_int_literals += 1
  131. elif m.group("float_literal"):
  132. line_float_literals += 1
  133. elif m.group("symbol"):
  134. line_symbols += 1
  135. elif m.group("keyword"):
  136. line_keywords += 1
  137. else:
  138. assert m.group("id"), "Line is '%s', and match is '%s'" % (
  139. line,
  140. line[m.start() : m.end()],
  141. )
  142. line_identifiers += 1
  143. stats.identifier_widths[len(m.group("id"))] += 1
  144. stats.string_literals += line_string_literals
  145. stats.string_literals_per_line[line_string_literals] += 1
  146. stats.int_literals += line_int_literals
  147. stats.int_literals_per_line[line_int_literals] += 1
  148. stats.float_literals += line_float_literals
  149. stats.float_literals_per_line[line_float_literals] += 1
  150. stats.symbols += line_symbols
  151. stats.symbols_per_line[line_symbols] += 1
  152. stats.keywords += line_keywords
  153. stats.keywords_per_line[line_keywords] += 1
  154. stats.identifiers += line_identifiers
  155. stats.ids_per_line[line_identifiers] += 1
  156. return stats
  157. def parse_args(args: Optional[list[str]] = None) -> argparse.Namespace:
  158. """Parsers command-line arguments and flags."""
  159. parser = argparse.ArgumentParser(description=__doc__)
  160. parser.add_argument(
  161. "files",
  162. metavar="FILE",
  163. type=Path,
  164. nargs="+",
  165. help="A file to scan while collecting statistics.",
  166. )
  167. return parser.parse_args(args=args)
  168. def main() -> None:
  169. parsed_args = parse_args()
  170. stats = Stats()
  171. with alive_bar(len(parsed_args.files)) as bar:
  172. with Pool() as p:
  173. for file_stats in p.imap_unordered(scan_file, parsed_args.files):
  174. stats.accumulate(file_stats)
  175. bar()
  176. print(
  177. """
  178. ## Stats ##
  179. Lines: %(lines)d
  180. Blank lines: %(blank_lines)d
  181. Comment lines: %(comment_lines)d
  182. Empty comment lines: %(empty_comment_lines)d
  183. Lines with trailing comments: %(lines_with_trailing_comments)d
  184. Classes: %(classes)d
  185. Internal comments: %(internal_comments)d
  186. String literals: %(string_literals)d
  187. Int literals: %(int_literals)d
  188. Float literals: %(float_literals)d
  189. Symbols: %(symbols)d
  190. Keywords: %(keywords)d
  191. IDs: %(identifiers)d"""
  192. % asdict(stats)
  193. )
  194. tokens = (
  195. stats.string_literals
  196. + stats.int_literals
  197. + stats.float_literals
  198. + stats.symbols
  199. + stats.keywords
  200. + stats.identifiers
  201. )
  202. print(
  203. f"""
  204. Fraction of blank lines: {stats.blank_lines / stats.lines}
  205. Fraction of comment lines: {stats.comment_lines / stats.lines}
  206. Total counted tokens: {tokens}
  207. Fraction string literals: {stats.string_literals / tokens}
  208. Fraction int literals: {stats.int_literals / tokens}
  209. Fraction float literals: {stats.float_literals / tokens}
  210. Fraction symbols: {stats.symbols / tokens}
  211. Fraction keywords: {stats.keywords / tokens}
  212. Fraction IDs: {stats.identifiers / tokens}
  213. """
  214. )
  215. def print_histogram(
  216. title: str, data: dict[int, int], column_format: str
  217. ) -> None:
  218. print()
  219. key_min = min(data.keys())
  220. key_max = max(data.keys()) + 1
  221. values = [data.get(k, 0) for k in range(key_min, key_max)]
  222. keys = [column_format % k for k in range(key_min, key_max)]
  223. total = sum(values)
  224. median = key_min
  225. count = total
  226. for k in range(key_min, key_max):
  227. count -= data.get(k, 0)
  228. if count <= total / 2:
  229. median = k
  230. break
  231. print(title + f" (median: {median})")
  232. fig = tpl.figure()
  233. fig.barh(values, keys)
  234. fig.show()
  235. print_histogram(
  236. "## Comment line widths ##", stats.comment_line_widths, "%d columns"
  237. )
  238. print_histogram(
  239. "## String literals per line ##",
  240. stats.string_literals_per_line,
  241. "%d literals",
  242. )
  243. print_histogram(
  244. "## Int literals per line ##",
  245. stats.int_literals_per_line,
  246. "%d literals",
  247. )
  248. print_histogram(
  249. "## Float literals per line ##",
  250. stats.float_literals_per_line,
  251. "%d literals",
  252. )
  253. print_histogram(
  254. "## Symbols per line ##", stats.symbols_per_line, "%d symbols"
  255. )
  256. print_histogram(
  257. "## Keywords per line ##", stats.keywords_per_line, "%d keywords"
  258. )
  259. print_histogram("## ID widths ##", stats.identifier_widths, "%d characters")
  260. print_histogram("## IDs per line ##", stats.ids_per_line, "%d ids")
  261. if __name__ == "__main__":
  262. main()