tests.yaml 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. # Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. # Exceptions. See /LICENSE for license information.
  3. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. name: test
  5. on:
  6. push:
  7. branches: [trunk]
  8. pull_request:
  9. merge_group:
  10. # Cancel previous workflows on the PR when there are multiple fast commits.
  11. # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency
  12. concurrency:
  13. group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
  14. cancel-in-progress: true
  15. jobs:
  16. test:
  17. strategy:
  18. matrix:
  19. # At present, these images are newer than "latest". We use them to test
  20. # against more recent tooling versions.
  21. # https://github.com/actions/runner-images
  22. os: [ubuntu-22.04, macos-12]
  23. build_mode: [fastbuild, opt]
  24. runs-on: ${{ matrix.os }}
  25. steps:
  26. # Ubuntu images start with 23GB available, and this adds 14GB more. For
  27. # comparison, MacOS images have >100GB free.
  28. - name: Free up disk space (Ubuntu)
  29. if: matrix.os == 'ubuntu-22.04'
  30. uses: jlumbroso/free-disk-space@v1.2.0
  31. with:
  32. android: true
  33. dotnet: true
  34. haskell: true
  35. # Although we could delete more, if we run into a limit, it provides a
  36. # little flexibility to get space while trying to shrink the build.
  37. # There's also support for docker images at head (1.2.0 is still
  38. # the latest release).
  39. large-packages: false
  40. swap-storage: false
  41. # Checkout the pull request head or the branch.
  42. - name: Checkout pull request
  43. if: github.event_name == 'pull_request'
  44. uses: actions/checkout@v3
  45. with:
  46. ref: ${{ github.event.pull_request.head.sha }}
  47. - name: Checkout branch
  48. if: github.event_name != 'pull_request'
  49. uses: actions/checkout@v3
  50. # Tests should only run on applicable paths, but we still need to have an
  51. # action run for the merge queue. We filter steps based on the paths here,
  52. # and condition steps on the output.
  53. - id: filter
  54. uses: dorny/paths-filter@v2
  55. with:
  56. filters: |
  57. has_code:
  58. - '!{**/*.md,LICENSE,CODEOWNERS,.git*}'
  59. # Setup Python and related tools.
  60. - uses: actions/setup-python@v4
  61. if: steps.filter.outputs.has_code == 'true'
  62. with:
  63. # Match the min version listed in docs/project/contribution_tools.md
  64. python-version: '3.9'
  65. # Use LLVM following:
  66. # https://github.com/actions/runner-images/blob/main/images/macos/macos-12-Readme.md
  67. - name: Setup LLVM and Clang (macOS)
  68. if: steps.filter.outputs.has_code == 'true' && matrix.os == 'macos-12'
  69. run: |
  70. LLVM_PATH="$(brew --prefix llvm@15)"
  71. echo "Using ${LLVM_PATH}"
  72. echo "${LLVM_PATH}/bin" >> $GITHUB_PATH
  73. echo '*** ls "${LLVM_PATH}"'
  74. ls "${LLVM_PATH}"
  75. echo '*** ls "${LLVM_PATH}/bin"'
  76. ls "${LLVM_PATH}/bin"
  77. # Use LLVM following:
  78. # https://github.com/actions/runner-images/blob/main/images/linux/Ubuntu2204-Readme.md
  79. - name: Setup LLVM and Clang (Ubuntu)
  80. if:
  81. steps.filter.outputs.has_code == 'true' && matrix.os == 'ubuntu-22.04'
  82. run: |
  83. # TODO: Re-enable once llvm-15 is working.
  84. # https://github.com/actions/runner-images/issues/8253
  85. # LLVM_PATH="/usr/lib/llvm-15"
  86. # if [[ ! -e "${LLVM_PATH}" ]]; then
  87. LLVM_PATH="/usr/lib/llvm-14"
  88. # fi
  89. echo "Using ${LLVM_PATH}"
  90. echo "${LLVM_PATH}/bin" >> $GITHUB_PATH
  91. echo '*** ls "${LLVM_PATH}"'
  92. ls "${LLVM_PATH}"
  93. echo '*** ls "${LLVM_PATH}/bin"'
  94. ls "${LLVM_PATH}/bin"
  95. # Print the various tool paths and versions to help in debugging.
  96. - name: Print tool debugging info
  97. if: steps.filter.outputs.has_code == 'true'
  98. run: |
  99. echo '*** PATH'
  100. echo $PATH
  101. echo '*** bazelisk'
  102. which bazelisk
  103. bazelisk --version
  104. echo '*** python'
  105. which python
  106. python --version
  107. echo '*** clang'
  108. which clang
  109. clang --version
  110. echo '*** clang++'
  111. which clang++
  112. clang++ --version
  113. # Disable uploads when the remote cache is read-only.
  114. - name: Set up remote cache access (read-only)
  115. if:
  116. steps.filter.outputs.has_code == 'true' && github.event_name ==
  117. 'pull_request'
  118. run: |
  119. echo "remote_cache_upload=--remote_upload_local_results=false" \
  120. >> $GITHUB_ENV
  121. # Provide a cache key when the remote cache is read-write.
  122. - name: Set up remote cache access (read-write)
  123. if:
  124. steps.filter.outputs.has_code == 'true' && github.event_name !=
  125. 'pull_request'
  126. env:
  127. REMOTE_CACHE_KEY: ${{ secrets.CARBON_BUILDS_GITHUB }}
  128. run: |
  129. echo "$REMOTE_CACHE_KEY" | base64 -d > $HOME/remote_cache_key.json
  130. echo "remote_cache_upload=--google_credentials=$HOME/remote_cache_key.json" \
  131. >> $GITHUB_ENV
  132. # We need to replace the `.` with a `_` for the build cache.
  133. - name: Setup LLVM and Clang (macOS)
  134. if: steps.filter.outputs.has_code == 'true' && matrix.os == 'macos-12'
  135. run: |
  136. echo "os_for_cache=macos-12" >> $GITHUB_ENV
  137. - name: Setup LLVM and Clang (Ubuntu)
  138. if:
  139. steps.filter.outputs.has_code == 'true' && matrix.os == 'ubuntu-22.04'
  140. run: |
  141. echo "os_for_cache=ubuntu-22_04" >> $GITHUB_ENV
  142. # Add our bazel configuration and print basic info to ease debugging.
  143. - name: Configure Bazel and print info
  144. if: steps.filter.outputs.has_code == 'true'
  145. env:
  146. # Add a cache version for changes that bazel won't otherwise detect,
  147. # like llvm version changes.
  148. CACHE_VERSION: 1
  149. run: |
  150. cat >user.bazelrc <<EOF
  151. # Enable remote cache for our CI but minimize downloads.
  152. build --remote_cache=https://storage.googleapis.com/carbon-builds-github-v${CACHE_VERSION}-${{ env.os_for_cache }}
  153. build --remote_download_minimal
  154. build ${{ env.remote_cache_upload }}
  155. # Set an artificially high jobs count. This flag controls the number
  156. # of concurrency Bazel itself uses, which is essential for actions
  157. # that are internally blocked on for example downloading results form
  158. # the cache above. Without setting this high, Bazel will pick a small
  159. # number based on the available host CPUs and the reality will be a
  160. # long chain of largely serialized download events with little or no
  161. # usage of the host machine. Fortunately, local actions are
  162. # *separately* gated on '--local_*_resources' that will avoid a large
  163. # jobs value overwhelming the host. There is a bug to make downloads
  164. # behave completely asynchronously and remove the need for this filed
  165. # back in 2018 but work seemed to not finish:
  166. # https://github.com/bazelbuild/bazel/issues/6394
  167. #
  168. # There is a new effort (yay!) but until then it seems worth using the
  169. # workaround of a high jobs value. The biggest downside (increased
  170. # heap usage) seems like it isn't currently a big loss for our builds.
  171. #
  172. # Higher values like 50 have led to CI failures with network errors
  173. # and IOExceptions, see
  174. # https://discord.com/channels/655572317891461132/707150492370862090/1151605725576056934
  175. build --jobs=32
  176. # General build options.
  177. build --verbose_failures
  178. test --test_output=errors
  179. EOF
  180. bazelisk info
  181. # Just for visibility, print space before and after the build.
  182. - name: Disk space before build
  183. if: steps.filter.outputs.has_code == 'true'
  184. run: df -h
  185. # Build and run all targets on branch pushes to ensure we always have a
  186. # clean tree. We don't expect this to be an interactive path and so don't
  187. # optimize the latency of this step.
  188. - name: Compute impacted pull request targets (for push)
  189. if:
  190. steps.filter.outputs.has_code == 'true' && github.event_name == 'push'
  191. env:
  192. TARGETS_FILE: ${{ runner.temp }}/targets
  193. run: |
  194. echo "//..." >$TARGETS_FILE
  195. # Compute the set of possible rules impacted by this change using
  196. # Bazel-based diffing. This lets PRs and the merge queue have a much more
  197. # efficient test CI action by avoiding even enumerating (and downloading)
  198. # all of the unaffected Bazel targets.
  199. - name: Compute impacted pull request targets
  200. if:
  201. steps.filter.outputs.has_code == 'true' && github.event_name != 'push'
  202. env:
  203. # Compute the base SHA from the different event structures.
  204. GIT_BASE_SHA:
  205. ${{ github.event_name == 'pull_request' &&
  206. github.event.pull_request.base.sha ||
  207. github.event.merge_group.base_sha }}
  208. TARGETS_FILE: ${{ runner.temp }}/targets
  209. run: |
  210. # First fetch the relevant base into the git repository.
  211. git fetch --depth=1 origin $GIT_BASE_SHA
  212. # Then use `target-determinator` as wrapped by our script.
  213. ./scripts/target_determinator.py $GIT_BASE_SHA >$TARGETS_FILE
  214. # Build and run just the tests impacted by the PR or merge group.
  215. - name: Test (${{ matrix.build_mode }})
  216. if: steps.filter.outputs.has_code == 'true'
  217. env:
  218. # 'libtool_check_unique failed to generate' workaround.
  219. # https://github.com/bazelbuild/bazel/issues/14113#issuecomment-999794586
  220. BAZEL_USE_CPP_ONLY_TOOLCHAIN: 1
  221. TARGETS_FILE: ${{ runner.temp }}/targets
  222. run: |
  223. # Bazel requires a test target to run the test command. There may be
  224. # no targets or there may only be non-test targets that we want to
  225. # build, so simply inject an explicit no-op test target.
  226. echo "//scripts:no_op_test" >> $TARGETS_FILE
  227. for i in {1..5}; do
  228. if (( $i == 4 )); then
  229. # Decrease the jobs sharply if we see repeated failures to try to
  230. # work around transient network errors even if it makes things
  231. # slower.
  232. echo "build --jobs=4" >>user.bazelrc
  233. fi
  234. bazel_exit=0
  235. bazelisk test -c ${{ matrix.build_mode }} \
  236. --target_pattern_file=$TARGETS_FILE || bazel_exit=$?
  237. # If we succeed, we're done.
  238. if (( $bazel_exit == 0 )); then
  239. break
  240. fi
  241. # Several error codes are reliably permanent, break immediately.
  242. # `1` -- The build failed.
  243. # `2` -- Command line or environment problem.
  244. # `3` -- Tests failed or timed out, we don't retry at this layer
  245. # on execution timeout.
  246. # `4` -- No tests found, which should be impossible here.
  247. # `8` -- Explicitly interrupted build.
  248. #
  249. # Note that `36` is documented as "likely permanent", but we retry
  250. # it as most of our transient failures actually produce that error
  251. # code.
  252. if (( $bazel_exit == 1 || $bazel_exit == 2 || $bazel_exit == 3 || \
  253. $bazel_exit == 4 || $bazel_exit == 8 || $bazel_exit == 8 ))
  254. then
  255. break
  256. fi
  257. echo "Retrying a failed build as it may be transient..."
  258. # Also sleep a bit to try to skip over transient machine load.
  259. sleep $i
  260. done
  261. # Propagate the Bazel exit code.
  262. exit $bazel_exit
  263. # See "Disk space before build".
  264. - name: Disk space after build
  265. if: steps.filter.outputs.has_code == 'true'
  266. run: df -h