Merge tag 'nektos/v0.2.60'

This commit is contained in:
Jason Song 2024-03-25 16:58:11 +08:00
commit 79a7577c15
No known key found for this signature in database
GPG Key ID: 8402EEEE4511A8B5
58 changed files with 1662 additions and 935 deletions

View File

@ -15,10 +15,10 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
check-latest: true
@ -26,7 +26,7 @@ jobs:
with:
version: v1.53
only-new-issues: true
- uses: megalinter/megalinter/flavors/go@v7.4.0
- uses: megalinter/megalinter/flavors/go@v7.8.0
env:
DEFAULT_BRANCH: master
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -38,12 +38,12 @@ jobs:
name: test-linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- uses: actions/setup-go@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
check-latest: true
@ -61,7 +61,7 @@ jobs:
- name: Run act from cli
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
- name: Upload Codecov report
uses: codecov/codecov-action@v3.1.4
uses: codecov/codecov-action@v3.1.5
with:
files: coverage.txt
fail_ci_if_error: true # optional (default = false)
@ -75,10 +75,10 @@ jobs:
name: test-${{matrix.os}}
runs-on: ${{matrix.os}}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
fetch-depth: 2
- uses: actions/setup-go@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
check-latest: true
@ -92,8 +92,8 @@ jobs:
name: snapshot
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
check-latest: true
@ -111,67 +111,67 @@ jobs:
args: release --snapshot --clean
- name: Capture x86_64 (64-bit) Linux binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-linux-amd64
path: dist/act_linux_amd64_v1/act
- name: Capture i386 (32-bit) Linux binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-linux-i386
path: dist/act_linux_386/act
- name: Capture arm64 (64-bit) Linux binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-linux-arm64
path: dist/act_linux_arm64/act
- name: Capture armv6 (32-bit) Linux binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-linux-armv6
path: dist/act_linux_arm_6/act
- name: Capture armv7 (32-bit) Linux binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-linux-armv7
path: dist/act_linux_arm_7/act
- name: Capture x86_64 (64-bit) Windows binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-windows-amd64
path: dist/act_windows_amd64_v1/act.exe
- name: Capture i386 (32-bit) Windows binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-windows-i386
path: dist/act_windows_386/act.exe
- name: Capture arm64 (64-bit) Windows binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-windows-arm64
path: dist/act_windows_arm64/act.exe
- name: Capture armv7 (32-bit) Windows binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-windows-armv7
path: dist/act_windows_arm_7/act.exe
- name: Capture x86_64 (64-bit) MacOS binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-macos-amd64
path: dist/act_darwin_amd64_v1/act
- name: Capture arm64 (64-bit) MacOS binary
if: ${{ !env.ACT }}
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: act-macos-arm64
path: dist/act_darwin_arm64/act

View File

@ -9,13 +9,13 @@ jobs:
name: promote
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
fetch-depth: 0
ref: master
token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
- uses: fregante/setup-git-user@v2
- uses: actions/setup-go@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
check-latest: true

View File

@ -9,10 +9,10 @@ jobs:
name: release
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
check-latest: true
@ -43,7 +43,7 @@ jobs:
apiKey: ${{ secrets.CHOCO_APIKEY }}
push: true
- name: GitHub CLI extension
uses: actions/github-script@v6
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
script: |

View File

@ -8,7 +8,7 @@ jobs:
name: Stale
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v8
- uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'Issue is stale and will be closed in 14 days unless there is new activity'

View File

@ -71,6 +71,10 @@ pull_request_rules:
- and:
- 'approved-reviews-by=@nektos/act-maintainers'
- '#approved-reviews-by>=2'
- and:
- 'author=@nektos/act-maintainers'
- 'approved-reviews-by=@nektos/act-maintainers'
- '#approved-reviews-by>=1'
- -draft
- -merged
- -closed

454
README.md
View File

@ -23,7 +23,7 @@ Tags:
---
![act-logo](https://github.com/nektos/act/wiki/img/logo-150.png)
![act-logo](https://raw.githubusercontent.com/wiki/nektos/act/img/logo-150.png)
# Overview [![push](https://github.com/nektos/act/workflows/push/badge.svg?branch=master&event=push)](https://github.com/nektos/act/actions) [![Join the chat at https://gitter.im/nektos/act](https://badges.gitter.im/nektos/act.svg)](https://gitter.im/nektos/act?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/nektos/act)](https://goreportcard.com/report/github.com/nektos/act) [![awesome-runners](https://img.shields.io/badge/listed%20on-awesome--runners-blue.svg)](https://github.com/jonico/awesome-runners)
@ -40,462 +40,12 @@ When you run `act` it reads in your GitHub Actions from `.github/workflows/` and
Let's see it in action with a [sample repo](https://github.com/cplee/github-actions-demo)!
![Demo](https://github.com/nektos/act/wiki/quickstart/act-quickstart-2.gif)
![Demo](https://raw.githubusercontent.com/wiki/nektos/act/quickstart/act-quickstart-2.gif)
# Act User Guide
Please look at the [act user guide](https://nektosact.com) for more documentation.
# Installation
## Necessary prerequisites for running `act`
`act` depends on `docker` to run workflows.
If you are using macOS, please be sure to follow the steps outlined in [Docker Docs for how to install Docker Desktop for Mac](https://docs.docker.com/docker-for-mac/install/).
If you are using Windows, please follow steps for [installing Docker Desktop on Windows](https://docs.docker.com/docker-for-windows/install/).
If you are using Linux, you will need to [install Docker Engine](https://docs.docker.com/engine/install/).
`act` is currently not supported with `podman` or other container backends (it might work, but it's not guaranteed). Please see [#303](https://github.com/nektos/act/issues/303) for updates.
## Installation through package managers
### [Homebrew](https://brew.sh/) (Linux/macOS)
[![homebrew version](https://img.shields.io/homebrew/v/act)](https://github.com/Homebrew/homebrew-core/blob/master/Formula/act.rb)
```shell
brew install act
```
or if you want to install version based on latest commit, you can run below (it requires compiler to be installed but Homebrew will suggest you how to install it, if you don't have it):
```shell
brew install act --HEAD
```
### [MacPorts](https://www.macports.org) (macOS)
[![MacPorts package](https://repology.org/badge/version-for-repo/macports/act-run-github-actions.svg)](https://repology.org/project/act-run-github-actions/versions)
```shell
sudo port install act
```
### [Chocolatey](https://chocolatey.org/) (Windows)
[![choco-shield](https://img.shields.io/chocolatey/v/act-cli)](https://community.chocolatey.org/packages/act-cli)
```shell
choco install act-cli
```
### [Scoop](https://scoop.sh/) (Windows)
[![scoop-shield](https://img.shields.io/scoop/v/act)](https://github.com/ScoopInstaller/Main/blob/master/bucket/act.json)
```shell
scoop install act
```
### [Winget](https://learn.microsoft.com/en-us/windows/package-manager/) (Windows)
[![Winget package](https://repology.org/badge/version-for-repo/winget/act-run-github-actions.svg)](https://repology.org/project/act-run-github-actions/versions)
```shell
winget install nektos.act
```
### [AUR](https://aur.archlinux.org/packages/act/) (Linux)
[![aur-shield](https://img.shields.io/aur/version/act)](https://aur.archlinux.org/packages/act/)
```shell
yay -Syu act
```
### [COPR](https://copr.fedorainfracloud.org/coprs/rubemlrm/act-cli/) (Linux)
```shell
dnf copr enable rubemlrm/act-cli
dnf install act-cli
```
### [Nix](https://nixos.org) (Linux/macOS)
[Nix recipe](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/tools/misc/act/default.nix)
Global install:
```sh
nix-env -iA nixpkgs.act
```
or through `nix-shell`:
```sh
nix-shell -p act
```
Using the latest [Nix command](https://nixos.wiki/wiki/Nix_command), you can run directly :
```sh
nix run nixpkgs#act
```
## Installation as GitHub CLI extension
Act can be installed as a [GitHub CLI](https://cli.github.com/) extension:
```sh
gh extension install https://github.com/nektos/gh-act
```
## Other install options
### Bash script
Run this command in your terminal:
```shell
curl -s https://raw.githubusercontent.com/nektos/act/master/install.sh | sudo bash
```
### Manual download
Download the [latest release](https://github.com/nektos/act/releases/latest) and add the path to your binary into your PATH.
# Example commands
```sh
# Command structure:
act [<event>] [options]
If no event name passed, will default to "on: push"
If actions handles only one event it will be used as default instead of "on: push"
# List all actions for all events:
act -l
# List the actions for a specific event:
act workflow_dispatch -l
# List the actions for a specific job:
act -j test -l
# Run the default (`push`) event:
act
# Run a specific event:
act pull_request
# Run a specific job:
act -j test
# Collect artifacts to the /tmp/artifacts folder:
act --artifact-server-path /tmp/artifacts
# Run a job in a specific workflow (useful if you have duplicate job names)
act -j lint -W .github/workflows/checks.yml
# Run in dry-run mode:
act -n
# Enable verbose-logging (can be used with any of the above commands)
act -v
```
## First `act` run
When running `act` for the first time, it will ask you to choose image to be used as default.
It will save that information to `~/.actrc`, please refer to [Configuration](#configuration) for more information about `.actrc` and to [Runners](#runners) for information about used/available Docker images.
## `GITHUB_TOKEN`
GitHub [automatically provides](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret) a `GITHUB_TOKEN` secret when running workflows inside GitHub.
If your workflow depends on this token, you need to create a [personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) and pass it to `act` as a secret:
```bash
act -s GITHUB_TOKEN=[insert token or leave blank and omit equals for secure input]
```
If [GitHub CLI](https://cli.github.com/) is installed, the [`gh auth token`](https://cli.github.com/manual/gh_auth_token) command can be used to automatically pass the token to act
```bash
act -s GITHUB_TOKEN="$(gh auth token)"
```
**WARNING**: `GITHUB_TOKEN` will be logged in shell history if not inserted through secure input or (depending on your shell config) the command is prefixed with a whitespace.
# Known Issues
## Services
Services are not currently supported but are being worked on. See: [#173](https://github.com/nektos/act/issues/173)
## `MODULE_NOT_FOUND`
A `MODULE_NOT_FOUND` during `docker cp` command [#228](https://github.com/nektos/act/issues/228) can happen if you are relying on local changes that have not been pushed. This can get triggered if the action is using a path, like:
```yaml
- name: test action locally
uses: ./
```
In this case, you _must_ use `actions/checkout@v2` with a path that _has the same name as your repository_. If your repository is called _my-action_, then your checkout step would look like:
```yaml
steps:
- name: Checkout
uses: actions/checkout@v2
with:
path: "my-action"
```
If the `path:` value doesn't match the name of the repository, a `MODULE_NOT_FOUND` will be thrown.
## `docker context` support
The current `docker context` isn't respected ([#583](https://github.com/nektos/act/issues/583)).
You can work around this by setting `DOCKER_HOST` before running `act`, with e.g:
```bash
export DOCKER_HOST=$(docker context inspect --format '{{.Endpoints.docker.Host}}')
```
# Runners
GitHub Actions offers managed [virtual environments](https://help.github.com/en/actions/reference/virtual-environments-for-github-hosted-runners) for running workflows. In order for `act` to run your workflows locally, it must run a container for the runner defined in your workflow file. Here are the images that `act` uses for each runner type and size:
| GitHub Runner | Micro Docker Image | Medium Docker Image | Large Docker Image |
| --------------- | -------------------------------- | ------------------------------------------------- | -------------------------------------------------- |
| `ubuntu-latest` | [`node:16-buster-slim`][micro] | [`catthehacker/ubuntu:act-latest`][docker_images] | [`catthehacker/ubuntu:full-latest`][docker_images] |
| `ubuntu-22.04` | [`node:16-bullseye-slim`][micro] | [`catthehacker/ubuntu:act-22.04`][docker_images] | `unavailable` |
| `ubuntu-20.04` | [`node:16-buster-slim`][micro] | [`catthehacker/ubuntu:act-20.04`][docker_images] | [`catthehacker/ubuntu:full-20.04`][docker_images] |
| `ubuntu-18.04` | [`node:16-buster-slim`][micro] | [`catthehacker/ubuntu:act-18.04`][docker_images] | [`catthehacker/ubuntu:full-18.04`][docker_images] |
[micro]: https://hub.docker.com/_/buildpack-deps
[docker_images]: https://github.com/catthehacker/docker_images
Windows and macOS based platforms are currently **unsupported and won't work** (see issue [#97](https://github.com/nektos/act/issues/97))
## Please see [IMAGES.md](./IMAGES.md) for more information about the Docker images that can be used with `act`
## Default runners are intentionally incomplete
These default images do **not** contain **all** the tools that GitHub Actions offers by default in their runners.
Many things can work improperly or not at all while running those image.
Additionally, some software might still not work even if installed properly, since GitHub Actions are running in fully virtualized machines while `act` is using Docker containers (e.g. Docker does not support running `systemd`).
In case of any problems [please create issue](https://github.com/nektos/act/issues/new/choose) in respective repository (issues with `act` in this repository, issues with `nektos/act-environments-ubuntu:18.04` in [`nektos/act-environments`](https://github.com/nektos/act-environments) and issues with any image from user `catthehacker` in [`catthehacker/docker_images`](https://github.com/catthehacker/docker_images))
## Alternative runner images
If you need an environment that works just like the corresponding GitHub runner then consider using an image provided by [nektos/act-environments](https://github.com/nektos/act-environments):
- [`nektos/act-environments-ubuntu:18.04`](https://hub.docker.com/r/nektos/act-environments-ubuntu/tags) - built from the Packer file GitHub uses in [actions/virtual-environments](https://github.com/actions/runner).
:warning: :elephant: `*** WARNING - this image is >18GB 😱***`
- [`catthehacker/ubuntu:full-*`](https://github.com/catthehacker/docker_images/pkgs/container/ubuntu) - built from Packer template provided by GitHub, see [catthehacker/virtual-environments-fork](https://github.com/catthehacker/virtual-environments-fork) or [catthehacker/docker_images](https://github.com/catthehacker/docker_images) for more information
## Using local runner images
The `--pull` flag is set to true by default due to a breaking on older default docker images. This would pull the docker image everytime act is executed.
Set `--pull` to false if a local docker image is needed
```sh
act --pull=false
```
## Use an alternative runner image
To use a different image for the runner, use the `-P` option.
```sh
act -P <platform>=<docker-image>
```
If your workflow uses `ubuntu-18.04`, consider below line as an example for changing Docker image used to run that workflow:
```sh
act -P ubuntu-18.04=nektos/act-environments-ubuntu:18.04
```
If you use multiple platforms in your workflow, you have to specify them to change which image is used.
For example, if your workflow uses `ubuntu-18.04`, `ubuntu-16.04` and `ubuntu-latest`, specify all platforms like below
```sh
act -P ubuntu-18.04=nektos/act-environments-ubuntu:18.04 -P ubuntu-latest=ubuntu:latest -P ubuntu-16.04=node:16-buster-slim
```
# Secrets
To run `act` with secrets, you can enter them interactively, supply them as environment variables or load them from a file. The following options are available for providing secrets:
- `act -s MY_SECRET=somevalue` - use `somevalue` as the value for `MY_SECRET`.
- `act -s MY_SECRET` - check for an environment variable named `MY_SECRET` and use it if it exists. If the environment variable is not defined, prompt the user for a value.
- `act --secret-file my.secrets` - load secrets values from `my.secrets` file.
- secrets file format is the same as `.env` format
# Vars
To run `act` with repository variables that are acessible inside the workflow via ${{ vars.VARIABLE }}, you can enter them interactively or load them from a file. The following options are available for providing github repository variables:
- `act --var VARIABLE=somevalue` - use `somevalue` as the value for `VARIABLE`.
- `act --var-file my.variables` - load variables values from `my.variables` file.
- variables file format is the same as `.env` format
# Configuration
You can provide default configuration flags to `act` by either creating a `./.actrc` or a `~/.actrc` file. Any flags in the files will be applied before any flags provided directly on the command line. For example, a file like below will always use the `nektos/act-environments-ubuntu:18.04` image for the `ubuntu-latest` runner:
```sh
# sample .actrc file
-P ubuntu-latest=nektos/act-environments-ubuntu:18.04
```
Additionally, act supports loading environment variables from an `.env` file. The default is to look in the working directory for the file but can be overridden by:
```sh
act --env-file my.env
```
`.env`:
```env
MY_ENV_VAR=MY_ENV_VAR_VALUE
MY_2ND_ENV_VAR="my 2nd env var value"
```
# Skipping jobs
You cannot use the `env` context in job level if conditions, but you can add a custom event property to the `github` context. You can use this method also on step level if conditions.
```yml
on: push
jobs:
deploy:
if: ${{ !github.event.act }} # skip during local actions testing
runs-on: ubuntu-latest
steps:
- run: exit 0
```
And use this `event.json` file with act otherwise the Job will run:
```json
{
"act": true
}
```
Run act like
```sh
act -e event.json
```
_Hint: you can add / append `-e event.json` as a line into `./.actrc`_
# Skipping steps
Act adds a special environment variable `ACT` that can be used to skip a step that you
don't want to run locally. E.g. a step that posts a Slack message or bumps a version number.
**You cannot use this method in job level if conditions, see [Skipping jobs](#skipping-jobs)**
```yml
- name: Some step
if: ${{ !env.ACT }}
run: |
...
```
# Events
Every [GitHub event](https://developer.github.com/v3/activity/events/types) is accompanied by a payload. You can provide these events in JSON format with the `--eventpath` to simulate specific GitHub events kicking off an action. For example:
```json
{
"pull_request": {
"head": {
"ref": "sample-head-ref"
},
"base": {
"ref": "sample-base-ref"
}
}
}
```
```sh
act pull_request -e pull-request.json
```
Act will properly provide `github.head_ref` and `github.base_ref` to the action as expected.
# Pass Inputs to Manually Triggered Workflows
Example workflow file
```yaml
on:
workflow_dispatch:
inputs:
NAME:
description: "A random input name for the workflow"
type: string
SOME_VALUE:
description: "Some other input to pass"
type: string
jobs:
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Test with inputs
run: |
echo "Hello ${{ github.event.inputs.NAME }} and ${{ github.event.inputs.SOME_VALUE }}!"
```
## via input or input-file flag
- `act --input NAME=somevalue` - use `somevalue` as the value for `NAME` input.
- `act --input-file my.input` - load input values from `my.input` file.
- input file format is the same as `.env` format
## via JSON
Example JSON payload file conveniently named `payload.json`
```json
{
"inputs": {
"NAME": "Manual Workflow",
"SOME_VALUE": "ABC"
}
}
```
Command for triggering the workflow
```sh
act workflow_dispatch -e payload.json
```
# GitHub Enterprise
Act supports using and authenticating against private GitHub Enterprise servers.
To use your custom GHE server, set the CLI flag `--github-instance` to your hostname (e.g. `github.company.com`).
Please note that if your GHE server requires authentication, we will use the secret provided via `GITHUB_TOKEN`.
Please also see the [official documentation for GitHub actions on GHE](https://docs.github.com/en/enterprise-server@3.0/admin/github-actions/about-using-actions-in-your-enterprise) for more information on how to use actions.
# Support
Need help? Ask on [Gitter](https://gitter.im/nektos/act)!

View File

@ -1 +1 @@
0.2.52
0.2.60

View File

@ -55,7 +55,11 @@ type Input struct {
replaceGheActionTokenWithGithubCom string
matrix []string
actionCachePath string
actionOfflineMode bool
logPrefixJobID bool
networkName string
useNewActionCache bool
localRepository []string
}
func (i *Input) resolve(path string) string {

View File

@ -15,7 +15,7 @@ func (i *Input) newPlatforms() map[string]string {
for _, p := range i.platforms {
pParts := strings.Split(p, "=")
if len(pParts) == 2 {
platforms[pParts[0]] = pParts[1]
platforms[strings.ToLower(pParts[0])] = pParts[1]
}
}
return platforms

View File

@ -14,6 +14,7 @@ import (
"github.com/AlecAivazis/survey/v2"
"github.com/adrg/xdg"
"github.com/andreaskoch/go-fswatch"
docker_container "github.com/docker/docker/api/types/container"
"github.com/joho/godotenv"
gitignore "github.com/sabhiram/go-gitignore"
log "github.com/sirupsen/logrus"
@ -31,7 +32,7 @@ import (
// Execute is the entry point to running the CLI
func Execute(ctx context.Context, version string) {
input := new(Input)
var rootCmd = &cobra.Command{
rootCmd := &cobra.Command{
Use: "act [event name to run] [flags]\n\nIf no event name passed, will default to \"on: push\"\nIf actions handles only one event it will be used as default instead of \"on: push\"",
Short: "Run GitHub actions locally by specifying the event name (e.g. `push`) or an action name directly.",
Args: cobra.MaximumNArgs(1),
@ -96,6 +97,10 @@ func Execute(ctx context.Context, version string) {
rootCmd.PersistentFlags().StringVarP(&input.cacheServerAddr, "cache-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the cache server binds.")
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
rootCmd.PersistentFlags().StringVarP(&input.actionCachePath, "action-cache-path", "", filepath.Join(CacheHomeDir, "act"), "Defines the path where the actions get cached and host workspaces created.")
rootCmd.PersistentFlags().BoolVarP(&input.actionOfflineMode, "action-offline-mode", "", false, "If action contents exists, it will not be fetch and pull again. If turn on this,will turn off force pull")
rootCmd.PersistentFlags().StringVarP(&input.networkName, "network", "", "host", "Sets a docker network name. Defaults to host.")
rootCmd.PersistentFlags().BoolVarP(&input.useNewActionCache, "use-new-action-cache", "", false, "Enable using the new Action Cache for storing Actions locally")
rootCmd.PersistentFlags().StringArrayVarP(&input.localRepository, "local-repository", "", []string{}, "Replaces the specified repository and ref with a local folder (e.g. https://github.com/test/test@v0=/home/act/test or test/test@v0=/home/act/test, the latter matches any hosts or protocols)")
rootCmd.SetArgs(args())
if err := rootCmd.Execute(); err != nil {
@ -103,51 +108,22 @@ func Execute(ctx context.Context, version string) {
}
}
// Return locations where Act's config can be found in order: XDG spec, .actrc in HOME directory, .actrc in invocation directory
func configLocations() []string {
configFileName := ".actrc"
// reference: https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html
var actrcXdg string
for _, fileName := range []string{"act/actrc", configFileName} {
if foundConfig, err := xdg.SearchConfigFile(fileName); foundConfig != "" && err == nil {
actrcXdg = foundConfig
break
}
homePath := filepath.Join(UserHomeDir, configFileName)
invocationPath := filepath.Join(".", configFileName)
// Though named xdg, adrg's lib support macOS and Windows config paths as well
// It also takes cares of creating the parent folder so we don't need to bother later
specPath, err := xdg.ConfigFile("act/actrc")
if err != nil {
specPath = homePath
}
return []string{
filepath.Join(UserHomeDir, configFileName),
actrcXdg,
filepath.Join(".", configFileName),
}
}
var commonSocketPaths = []string{
"/var/run/docker.sock",
"/run/podman/podman.sock",
"$HOME/.colima/docker.sock",
"$XDG_RUNTIME_DIR/docker.sock",
"$XDG_RUNTIME_DIR/podman/podman.sock",
`\\.\pipe\docker_engine`,
"$HOME/.docker/run/docker.sock",
}
// returns socket path or false if not found any
func socketLocation() (string, bool) {
if dockerHost, exists := os.LookupEnv("DOCKER_HOST"); exists {
return dockerHost, true
}
for _, p := range commonSocketPaths {
if _, err := os.Lstat(os.ExpandEnv(p)); err == nil {
if strings.HasPrefix(p, `\\.\`) {
return "npipe://" + filepath.ToSlash(os.ExpandEnv(p)), true
}
return "unix://" + filepath.ToSlash(os.ExpandEnv(p)), true
}
}
return "", false
// This order should be enforced since the survey part relies on it
return []string{specPath, homePath, invocationPath}
}
func args() []string {
@ -182,7 +158,7 @@ func bugReport(ctx context.Context, version string) error {
report += sprintf("Docker host:", dockerHost)
report += fmt.Sprintln("Sockets found:")
for _, p := range commonSocketPaths {
for _, p := range container.CommonSocketLocations {
if _, err := os.Lstat(os.ExpandEnv(p)); err != nil {
continue
} else if _, err := os.Stat(os.ExpandEnv(p)); err != nil {
@ -265,7 +241,8 @@ func readArgsFile(file string, split bool) []string {
}()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
arg := strings.TrimSpace(scanner.Text())
arg := os.ExpandEnv(strings.TrimSpace(scanner.Text()))
if strings.HasPrefix(arg, "-") && split {
args = append(args, regexp.MustCompile(`\s`).Split(arg, 2)...)
} else if !split {
@ -291,19 +268,17 @@ func cleanup(inputs *Input) func(*cobra.Command, []string) {
}
}
func parseEnvs(env []string, envs map[string]string) bool {
if env != nil {
for _, envVar := range env {
e := strings.SplitN(envVar, `=`, 2)
if len(e) == 2 {
envs[e[0]] = e[1]
} else {
envs[e[0]] = ""
}
func parseEnvs(env []string) map[string]string {
envs := make(map[string]string, len(env))
for _, envVar := range env {
e := strings.SplitN(envVar, `=`, 2)
if len(e) == 2 {
envs[e[0]] = e[1]
} else {
envs[e[0]] = ""
}
return true
}
return false
return envs
}
func readYamlFile(file string) (map[string]string, error) {
@ -354,18 +329,6 @@ func parseMatrix(matrix []string) map[string]map[string]bool {
return matrixes
}
func isDockerHostURI(daemonPath string) bool {
if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 {
scheme := daemonPath[:protoIndex]
if strings.IndexFunc(scheme, func(r rune) bool {
return (r < 'a' || r > 'z') && (r < 'A' || r > 'Z')
}) == -1 {
return true
}
}
return false
}
//nolint:gocyclo
func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
@ -376,27 +339,12 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
if ok, _ := cmd.Flags().GetBool("bug-report"); ok {
return bugReport(ctx, cmd.Version)
}
// Prefer DOCKER_HOST, don't override it
socketPath, hasDockerHost := os.LookupEnv("DOCKER_HOST")
if !hasDockerHost {
// a - in containerDaemonSocket means don't mount, preserve this value
// otherwise if input.containerDaemonSocket is a filepath don't use it as socketPath
skipMount := input.containerDaemonSocket == "-" || !isDockerHostURI(input.containerDaemonSocket)
if input.containerDaemonSocket != "" && !skipMount {
socketPath = input.containerDaemonSocket
} else {
socket, found := socketLocation()
if !found {
log.Errorln("daemon Docker Engine socket not found and containerDaemonSocket option was not set")
} else {
socketPath = socket
}
if !skipMount {
input.containerDaemonSocket = socketPath
}
}
os.Setenv("DOCKER_HOST", socketPath)
if ret, err := container.GetSocketAndHost(input.containerDaemonSocket); err != nil {
log.Warnf("Couldn't get a valid docker connection: %+v", err)
} else {
os.Setenv("DOCKER_HOST", ret.Host)
input.containerDaemonSocket = ret.Socket
log.Infof("Using docker host '%s', and daemon socket '%s'", ret.Host, ret.Socket)
}
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" && input.containerArchitecture == "" {
@ -409,13 +357,11 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
}
log.Debugf("Loading environment from %s", input.Envfile())
envs := make(map[string]string)
_ = parseEnvs(input.envs, envs)
envs := parseEnvs(input.envs)
_ = readEnvs(input.Envfile(), envs)
log.Debugf("Loading action inputs from %s", input.Inputfile())
inputs := make(map[string]string)
_ = parseEnvs(input.inputs, inputs)
inputs := parseEnvs(input.inputs)
_ = readEnvs(input.Inputfile(), inputs)
log.Debugf("Loading secrets from %s", input.Secretfile())
@ -552,6 +498,7 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
}
}
if !cfgFound && len(cfgLocations) > 0 {
// The first config location refers to the global config folder one
if err := defaultImageSurvey(cfgLocations[0]); err != nil {
log.Fatal(err)
}
@ -578,11 +525,12 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
EventName: eventName,
EventPath: input.EventPath(),
DefaultBranch: defaultbranch,
ForcePull: input.forcePull,
ForcePull: !input.actionOfflineMode && input.forcePull,
ForceRebuild: input.forceRebuild,
ReuseContainers: input.reuseContainers,
Workdir: input.Workdir(),
ActionCacheDir: input.actionCachePath,
ActionOfflineMode: input.actionOfflineMode,
BindWorkdir: input.bindWorkdir,
LogOutput: !input.noOutput,
JSONLogger: input.jsonLogger,
@ -612,6 +560,32 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
ReplaceGheActionWithGithubCom: input.replaceGheActionWithGithubCom,
ReplaceGheActionTokenWithGithubCom: input.replaceGheActionTokenWithGithubCom,
Matrix: matrixes,
ContainerNetworkMode: docker_container.NetworkMode(input.networkName),
}
if input.useNewActionCache || len(input.localRepository) > 0 {
if input.actionOfflineMode {
config.ActionCache = &runner.GoGitActionCacheOfflineMode{
Parent: runner.GoGitActionCache{
Path: config.ActionCacheDir,
},
}
} else {
config.ActionCache = &runner.GoGitActionCache{
Path: config.ActionCacheDir,
}
}
if len(input.localRepository) > 0 {
localRepositories := map[string]string{}
for _, l := range input.localRepository {
k, v, _ := strings.Cut(l, "=")
localRepositories[k] = v
}
config.ActionCache = &runner.LocalRepositoryCache{
Parent: config.ActionCache,
LocalRepositories: localRepositories,
CacheDirCache: map[string]string{},
}
}
}
r, err := runner.New(config)
if err != nil {

49
go.mod
View File

@ -7,33 +7,33 @@ require (
github.com/Masterminds/semver v1.5.0
github.com/adrg/xdg v0.4.0
github.com/andreaskoch/go-fswatch v1.0.0
github.com/creack/pty v1.1.18
github.com/docker/cli v24.0.6+incompatible
github.com/creack/pty v1.1.21
github.com/docker/cli v24.0.7+incompatible
github.com/docker/distribution v2.8.3+incompatible
github.com/docker/docker v24.0.6+incompatible // 24.0 branch
github.com/docker/docker v24.0.7+incompatible // 24.0 branch
github.com/docker/go-connections v0.4.0
github.com/go-git/go-billy/v5 v5.5.0
github.com/go-git/go-git/v5 v5.9.0
github.com/go-git/go-git/v5 v5.11.0
github.com/gobwas/glob v0.2.3
github.com/imdario/mergo v0.3.16
github.com/joho/godotenv v1.5.1
github.com/julienschmidt/httprouter v1.3.0
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/mattn/go-isatty v0.0.19
github.com/moby/buildkit v0.12.2
github.com/mattn/go-isatty v0.0.20
github.com/moby/buildkit v0.12.5
github.com/moby/patternmatcher v0.6.0
github.com/opencontainers/image-spec v1.1.0-rc5
github.com/opencontainers/image-spec v1.1.0
github.com/opencontainers/selinux v1.11.0
github.com/pkg/errors v0.9.1
github.com/rhysd/actionlint v1.6.26
github.com/rhysd/actionlint v1.6.27
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.7.0
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.4
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a
go.etcd.io/bbolt v1.3.7
golang.org/x/term v0.13.0
go.etcd.io/bbolt v1.3.9
golang.org/x/term v0.17.0
gopkg.in/yaml.v3 v3.0.1
gotest.tools/v3 v3.5.1
)
@ -42,8 +42,7 @@ require (
dario.cat/mergo v1.0.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
github.com/acomagu/bufpipe v1.0.4 // indirect
github.com/cloudflare/circl v1.3.3 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/containerd/containerd v1.7.2 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
@ -51,16 +50,16 @@ require (
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.16.3 // indirect
github.com/klauspost/compress v1.17.2 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
@ -68,24 +67,24 @@ require (
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v1.1.7 // indirect
github.com/opencontainers/runc v1.1.12 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/skeema/knownhosts v1.2.0 // indirect
github.com/skeema/knownhosts v1.2.1 // indirect
github.com/stretchr/objx v0.5.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
golang.org/x/crypto v0.13.0 // indirect
golang.org/x/crypto v0.17.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.15.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.13.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

102
go.sum
View File

@ -15,8 +15,6 @@ github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63n
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls=
github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E=
github.com/andreaskoch/go-fswatch v1.0.0 h1:la8nP/HiaFCxP2IM6NZNUCoxgLWuyNFgH0RligBbnJU=
@ -24,14 +22,15 @@ github.com/andreaskoch/go-fswatch v1.0.0/go.mod h1:r5/iV+4jfwoY2sYqBkg8vpF04ehOv
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo=
github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -39,12 +38,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY=
github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE=
github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@ -54,16 +53,16 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8=
github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY=
github.com/go-git/go-git/v5 v5.9.0/go.mod h1:RKIqga24sWdMGZF+1Ekv9kylsDz6LzdTSI2s/OsZWE0=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
@ -72,8 +71,8 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
@ -94,31 +93,29 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/buildkit v0.12.2 h1:B7guBgY6sfk4dBlv/ORUxyYlp0UojYaYyATgtNwSCXc=
github.com/moby/buildkit v0.12.2/go.mod h1:adB4y0SxxX8trnrY+oEulb48ODLqPO6pKMF0ppGcCoI=
github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0=
github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
@ -129,10 +126,10 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk=
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss=
github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
@ -142,11 +139,11 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rhysd/actionlint v1.6.26 h1:zi7jPZf3Ks14gCXYAAL47uBziyFlX7+Xwilqhexct9g=
github.com/rhysd/actionlint v1.6.26/go.mod h1:TIj1DlCgtYLOv5CH9wCK+WJTOr1qAdnFzkGi0IgSCO4=
github.com/rhysd/actionlint v1.6.27 h1:xxwe8YmveBcC8lydW6GoHMGmB6H/MTqUU60F2p10wjw=
github.com/rhysd/actionlint v1.6.27/go.mod h1:m2nFUjAnOrxCMXuOMz9evYBRCLUsMnKY2IJl/N5umbk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
@ -159,10 +156,10 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM=
github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@ -184,8 +181,9 @@ github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a h1:oIi7H/bwFUY
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a/go.mod h1:iSvujNDmpZ6eQX+bg/0X3lF7LEmZ8N77g2a/J/+Zt2U=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
@ -194,8 +192,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -203,8 +201,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
@ -222,15 +220,15 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -252,15 +250,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -268,8 +266,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=

View File

@ -9,6 +9,7 @@ import (
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync/atomic"
@ -386,7 +387,12 @@ func (h *Handler) findCache(db *bolthold.Store, keys []string, version string) (
for _, prefix := range keys[1:] {
found := false
if err := db.ForEach(bolthold.Where("Key").Ge(prefix).And("Version").Eq(version).SortBy("Key"), func(v *Cache) error {
prefixPattern := fmt.Sprintf("^%s", regexp.QuoteMeta(prefix))
re, err := regexp.Compile(prefixPattern)
if err != nil {
continue
}
if err := db.ForEach(bolthold.Where("Key").RegExp(re).And("Version").Eq(version).SortBy("CreatedAt").Reverse(), func(v *Cache) error {
if !strings.HasPrefix(v.Key, prefix) {
return stop
}

View File

@ -221,10 +221,11 @@ func findGitSlug(url string, githubInstance string) (string, string, error) {
// NewGitCloneExecutorInput the input for the NewGitCloneExecutor
type NewGitCloneExecutorInput struct {
URL string
Ref string
Dir string
Token string
URL string
Ref string
Dir string
Token string
OfflineMode bool
// For Gitea
InsecureSkipTLS bool
@ -307,6 +308,8 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
return err
}
isOfflineMode := input.OfflineMode
// fetch latest changes
fetchOptions, pullOptions := gitOptions(input.Token)
@ -315,9 +318,11 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
pullOptions.InsecureSkipTLS = true
}
err = r.Fetch(&fetchOptions)
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
return err
if !isOfflineMode {
err = r.Fetch(&fetchOptions)
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
return err
}
}
var hash *plumbing.Hash
@ -377,9 +382,10 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
return err
}
}
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
logger.Debugf("Unable to pull %s: %v", refName, err)
if !isOfflineMode {
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
logger.Debugf("Unable to pull %s: %v", refName, err)
}
}
logger.Debugf("Cloned %s to %s", input.URL, input.Dir)

View File

@ -4,34 +4,36 @@ import (
"context"
"io"
"github.com/docker/go-connections/nat"
"github.com/nektos/act/pkg/common"
)
// NewContainerInput the input for the New function
type NewContainerInput struct {
Image string
Username string
Password string
Entrypoint []string
Cmd []string
WorkingDir string
Env []string
Binds []string
Mounts map[string]string
Name string
Stdout io.Writer
Stderr io.Writer
NetworkMode string
Privileged bool
UsernsMode string
Platform string
Options string
Image string
Username string
Password string
Entrypoint []string
Cmd []string
WorkingDir string
Env []string
Binds []string
Mounts map[string]string
Name string
Stdout io.Writer
Stderr io.Writer
NetworkMode string
Privileged bool
UsernsMode string
Platform string
Options string
NetworkAliases []string
ExposedPorts nat.PortSet
PortBindings nat.PortMap
// Gitea specific
AutoRemove bool
NetworkAliases []string
ValidVolumes []string
AutoRemove bool
ValidVolumes []string
}
// FileEntry is a file to copy to a container

View File

@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
// This file is exact copy of https://github.com/docker/cli/blob/9ac8584acfd501c3f4da0e845e3a40ed15c85041/cli/command/container/opts.go
// appended with license information.

View File

@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container
@ -15,6 +15,20 @@ func NewDockerNetworkCreateExecutor(name string) common.Executor {
if err != nil {
return err
}
defer cli.Close()
// Only create the network if it doesn't exist
networks, err := cli.NetworkList(ctx, types.NetworkListOptions{})
if err != nil {
return err
}
common.Logger(ctx).Debugf("%v", networks)
for _, network := range networks {
if network.Name == name {
common.Logger(ctx).Debugf("Network %v exists", name)
return nil
}
}
_, err = cli.NetworkCreate(ctx, name, types.NetworkCreate{
Driver: "bridge",
@ -34,7 +48,32 @@ func NewDockerNetworkRemoveExecutor(name string) common.Executor {
if err != nil {
return err
}
defer cli.Close()
return cli.NetworkRemove(ctx, name)
// Make shure that all network of the specified name are removed
// cli.NetworkRemove refuses to remove a network if there are duplicates
networks, err := cli.NetworkList(ctx, types.NetworkListOptions{})
if err != nil {
return err
}
common.Logger(ctx).Debugf("%v", networks)
for _, network := range networks {
if network.Name == name {
result, err := cli.NetworkInspect(ctx, network.ID, types.NetworkInspectOptions{})
if err != nil {
return err
}
if len(result.Containers) == 0 {
if err = cli.NetworkRemove(ctx, network.ID); err != nil {
common.Logger(ctx).Debugf("%v", err)
}
} else {
common.Logger(ctx).Debugf("Refusing to remove network %v because it still has active endpoints", name)
}
}
}
return err
}
}

View File

@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container
@ -38,6 +38,7 @@ import (
"golang.org/x/term"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/filecollector"
)
// NewContainer creates a reference to a container
@ -86,7 +87,7 @@ func supportsContainerImagePlatform(ctx context.Context, cli client.APIClient) b
func (cr *containerReference) Create(capAdd []string, capDrop []string) common.Executor {
return common.
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd).
NewInfoExecutor("%sdocker create image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode).
Then(
common.NewPipelineExecutor(
cr.connect(),
@ -98,7 +99,7 @@ func (cr *containerReference) Create(capAdd []string, capDrop []string) common.E
func (cr *containerReference) Start(attach bool) common.Executor {
return common.
NewInfoExecutor("%sdocker run image=%s platform=%s entrypoint=%+q cmd=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd).
NewInfoExecutor("%sdocker run image=%s platform=%s entrypoint=%+q cmd=%+q network=%+q", logPrefix, cr.input.Image, cr.input.Platform, cr.input.Entrypoint, cr.input.Cmd, cr.input.NetworkMode).
Then(
common.NewPipelineExecutor(
cr.connect(),
@ -260,8 +261,10 @@ func RunnerArch(ctx context.Context) string {
archMapper := map[string]string{
"x86_64": "X64",
"amd64": "X64",
"386": "X86",
"aarch64": "ARM64",
"arm64": "ARM64",
}
if arch, ok := archMapper[info.Architecture]; ok {
return arch
@ -365,15 +368,25 @@ func (cr *containerReference) mergeContainerConfigs(ctx context.Context, config
return nil, nil, fmt.Errorf("Cannot parse container options: '%s': '%w'", input.Options, err)
}
// If a service container's network is set to `host`, the container will not be able to
// connect to the specified network created for the job container and the service containers.
// So comment out the following code.
// FIXME: If everything is fine after gitea/act v0.260.0, remove the following comment.
// In the old fork version, the code is
// if len(copts.netMode.Value()) == 0 {
// if err = copts.netMode.Set("host"); err != nil {
// return nil, nil, fmt.Errorf("Cannot parse networkmode=host. This is an internal error and should not happen: '%w'", err)
// }
// }
// And it has been commented with:
// If a service container's network is set to `host`, the container will not be able to
// connect to the specified network created for the job container and the service containers.
// So comment out the following code.
// Not the if it's necessary to comment it in the new version,
// since it's cr.input.NetworkMode now.
if len(copts.netMode.Value()) == 0 {
if err = copts.netMode.Set(cr.input.NetworkMode); err != nil {
return nil, nil, fmt.Errorf("Cannot parse networkmode=%s. This is an internal error and should not happen: '%w'", cr.input.NetworkMode, err)
}
}
// If the `privileged` config has been disabled, `copts.privileged` need to be forced to false,
// even if the user specifies `--privileged` in the options string.
@ -426,10 +439,11 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
input := cr.input
config := &container.Config{
Image: input.Image,
WorkingDir: input.WorkingDir,
Env: input.Env,
Tty: isTerminal,
Image: input.Image,
WorkingDir: input.WorkingDir,
Env: input.Env,
ExposedPorts: input.ExposedPorts,
Tty: isTerminal,
}
logger.Debugf("Common container.Config ==> %+v", config)
@ -465,14 +479,15 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
}
hostConfig := &container.HostConfig{
CapAdd: capAdd,
CapDrop: capDrop,
Binds: input.Binds,
Mounts: mounts,
NetworkMode: container.NetworkMode(input.NetworkMode),
Privileged: input.Privileged,
UsernsMode: container.UsernsMode(input.UsernsMode),
AutoRemove: input.AutoRemove,
CapAdd: capAdd,
CapDrop: capDrop,
Binds: input.Binds,
Mounts: mounts,
NetworkMode: container.NetworkMode(input.NetworkMode),
Privileged: input.Privileged,
UsernsMode: container.UsernsMode(input.UsernsMode),
PortBindings: input.PortBindings,
AutoRemove: input.AutoRemove,
}
logger.Debugf("Common container.HostConfig ==> %+v", hostConfig)
@ -484,10 +499,11 @@ func (cr *containerReference) create(capAdd []string, capDrop []string) common.E
// For Gitea
config, hostConfig = cr.sanitizeConfig(ctx, config, hostConfig)
// For Gitea
// network-scoped alias is supported only for containers in user defined networks
var networkingConfig *network.NetworkingConfig
if hostConfig.NetworkMode.IsUserDefined() && len(input.NetworkAliases) > 0 {
logger.Debugf("input.NetworkAliases ==> %v", input.NetworkAliases)
n := hostConfig.NetworkMode
// IsUserDefined and IsHost are broken on windows
if n.IsUserDefined() && n != "host" && len(input.NetworkAliases) > 0 {
endpointConfig := &network.EndpointSettings{
Aliases: input.NetworkAliases,
}
@ -709,10 +725,28 @@ func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal boo
}
func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
err := cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
// Mkdir
buf := &bytes.Buffer{}
tw := tar.NewWriter(buf)
_ = tw.WriteHeader(&tar.Header{
Name: destPath,
Mode: 777,
Typeflag: tar.TypeDir,
})
tw.Close()
err := cr.cli.CopyToContainer(ctx, cr.id, "/", buf, types.CopyToContainerOptions{})
if err != nil {
return fmt.Errorf("failed to mkdir to copy content to container: %w", err)
}
// Copy Content
err = cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
if err != nil {
return fmt.Errorf("failed to copy content to container: %w", err)
}
// If this fails, then folders have wrong permissions on non root container
if cr.UID != 0 || cr.GID != 0 {
_ = cr.Exec([]string{"chown", "-R", fmt.Sprintf("%d:%d", cr.UID, cr.GID), destPath}, nil, "0", "")(ctx)
}
return nil
}
@ -753,12 +787,12 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
ignorer = gitignore.NewMatcher(ps)
}
fc := &fileCollector{
Fs: &defaultFs{},
fc := &filecollector.FileCollector{
Fs: &filecollector.DefaultFs{},
Ignorer: ignorer,
SrcPath: srcPath,
SrcPrefix: srcPrefix,
Handler: &tarCollector{
Handler: &filecollector.TarCollector{
TarWriter: tw,
UID: cr.UID,
GID: cr.GID,
@ -766,7 +800,7 @@ func (cr *containerReference) copyDir(dstPath string, srcPath string, useGitIgno
},
}
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
if err != nil {
return err
}

View File

@ -23,6 +23,7 @@ func TestDocker(t *testing.T) {
ctx := context.Background()
client, err := GetDockerClient(ctx)
assert.NoError(t, err)
defer client.Close()
dockerBuild := NewDockerBuildExecutor(NewDockerBuildExecutorInput{
ContextDir: "testdata",

View File

@ -0,0 +1,134 @@
package container
import (
"fmt"
"os"
"path/filepath"
"strings"
log "github.com/sirupsen/logrus"
)
var CommonSocketLocations = []string{
"/var/run/docker.sock",
"/run/podman/podman.sock",
"$HOME/.colima/docker.sock",
"$XDG_RUNTIME_DIR/docker.sock",
"$XDG_RUNTIME_DIR/podman/podman.sock",
`\\.\pipe\docker_engine`,
"$HOME/.docker/run/docker.sock",
}
// returns socket URI or false if not found any
func socketLocation() (string, bool) {
if dockerHost, exists := os.LookupEnv("DOCKER_HOST"); exists {
return dockerHost, true
}
for _, p := range CommonSocketLocations {
if _, err := os.Lstat(os.ExpandEnv(p)); err == nil {
if strings.HasPrefix(p, `\\.\`) {
return "npipe://" + filepath.ToSlash(os.ExpandEnv(p)), true
}
return "unix://" + filepath.ToSlash(os.ExpandEnv(p)), true
}
}
return "", false
}
// This function, `isDockerHostURI`, takes a string argument `daemonPath`. It checks if the
// `daemonPath` is a valid Docker host URI. It does this by checking if the scheme of the URI (the
// part before "://") contains only alphabetic characters. If it does, the function returns true,
// indicating that the `daemonPath` is a Docker host URI. If it doesn't, or if the "://" delimiter
// is not found in the `daemonPath`, the function returns false.
func isDockerHostURI(daemonPath string) bool {
if protoIndex := strings.Index(daemonPath, "://"); protoIndex != -1 {
scheme := daemonPath[:protoIndex]
if strings.IndexFunc(scheme, func(r rune) bool {
return (r < 'a' || r > 'z') && (r < 'A' || r > 'Z')
}) == -1 {
return true
}
}
return false
}
type SocketAndHost struct {
Socket string
Host string
}
func GetSocketAndHost(containerSocket string) (SocketAndHost, error) {
log.Debugf("Handling container host and socket")
// Prefer DOCKER_HOST, don't override it
dockerHost, hasDockerHost := socketLocation()
socketHost := SocketAndHost{Socket: containerSocket, Host: dockerHost}
// ** socketHost.Socket cases **
// Case 1: User does _not_ want to mount a daemon socket (passes a dash)
// Case 2: User passes a filepath to the socket; is that even valid?
// Case 3: User passes a valid socket; do nothing
// Case 4: User omitted the flag; set a sane default
// ** DOCKER_HOST cases **
// Case A: DOCKER_HOST is set; use it, i.e. do nothing
// Case B: DOCKER_HOST is empty; use sane defaults
// Set host for sanity's sake, when the socket isn't useful
if !hasDockerHost && (socketHost.Socket == "-" || !isDockerHostURI(socketHost.Socket) || socketHost.Socket == "") {
// Cases: 1B, 2B, 4B
socket, found := socketLocation()
socketHost.Host = socket
hasDockerHost = found
}
// A - (dash) in socketHost.Socket means don't mount, preserve this value
// otherwise if socketHost.Socket is a filepath don't use it as socket
// Exit early if we're in an invalid state (e.g. when no DOCKER_HOST and user supplied "-", a dash or omitted)
if !hasDockerHost && socketHost.Socket != "" && !isDockerHostURI(socketHost.Socket) {
// Cases: 1B, 2B
// Should we early-exit here, since there is no host nor socket to talk to?
return SocketAndHost{}, fmt.Errorf("DOCKER_HOST was not set, couldn't be found in the usual locations, and the container daemon socket ('%s') is invalid", socketHost.Socket)
}
// Default to DOCKER_HOST if set
if socketHost.Socket == "" && hasDockerHost {
// Cases: 4A
log.Debugf("Defaulting container socket to DOCKER_HOST")
socketHost.Socket = socketHost.Host
}
// Set sane default socket location if user omitted it
if socketHost.Socket == "" {
// Cases: 4B
socket, _ := socketLocation()
// socket is empty if it isn't found, so assignment here is at worst a no-op
log.Debugf("Defaulting container socket to default '%s'", socket)
socketHost.Socket = socket
}
// Exit if both the DOCKER_HOST and socket are fulfilled
if hasDockerHost {
// Cases: 1A, 2A, 3A, 4A
if !isDockerHostURI(socketHost.Socket) {
// Cases: 1A, 2A
log.Debugf("DOCKER_HOST is set, but socket is invalid '%s'", socketHost.Socket)
}
return socketHost, nil
}
// Set a sane DOCKER_HOST default if we can
if isDockerHostURI(socketHost.Socket) {
// Cases: 3B
log.Debugf("Setting DOCKER_HOST to container socket '%s'", socketHost.Socket)
socketHost.Host = socketHost.Socket
// Both DOCKER_HOST and container socket are valid; short-circuit exit
return socketHost, nil
}
// Here there is no DOCKER_HOST _and_ the supplied container socket is not a valid URI (either invalid or a file path)
// Cases: 2B <- but is already handled at the top
// I.e. this path should never be taken
return SocketAndHost{}, fmt.Errorf("no DOCKER_HOST and an invalid container socket '%s'", socketHost.Socket)
}

View File

@ -0,0 +1,150 @@
package container
import (
"os"
"testing"
log "github.com/sirupsen/logrus"
assert "github.com/stretchr/testify/assert"
)
func init() {
log.SetLevel(log.DebugLevel)
}
var originalCommonSocketLocations = CommonSocketLocations
func TestGetSocketAndHostWithSocket(t *testing.T) {
// Arrange
CommonSocketLocations = originalCommonSocketLocations
dockerHost := "unix:///my/docker/host.sock"
socketURI := "/path/to/my.socket"
os.Setenv("DOCKER_HOST", dockerHost)
// Act
ret, err := GetSocketAndHost(socketURI)
// Assert
assert.Nil(t, err)
assert.Equal(t, SocketAndHost{socketURI, dockerHost}, ret)
}
func TestGetSocketAndHostNoSocket(t *testing.T) {
// Arrange
dockerHost := "unix:///my/docker/host.sock"
os.Setenv("DOCKER_HOST", dockerHost)
// Act
ret, err := GetSocketAndHost("")
// Assert
assert.Nil(t, err)
assert.Equal(t, SocketAndHost{dockerHost, dockerHost}, ret)
}
func TestGetSocketAndHostOnlySocket(t *testing.T) {
// Arrange
socketURI := "/path/to/my.socket"
os.Unsetenv("DOCKER_HOST")
CommonSocketLocations = originalCommonSocketLocations
defaultSocket, defaultSocketFound := socketLocation()
// Act
ret, err := GetSocketAndHost(socketURI)
// Assert
assert.NoError(t, err, "Expected no error from GetSocketAndHost")
assert.Equal(t, true, defaultSocketFound, "Expected to find default socket")
assert.Equal(t, socketURI, ret.Socket, "Expected socket to match common location")
assert.Equal(t, defaultSocket, ret.Host, "Expected ret.Host to match default socket location")
}
func TestGetSocketAndHostDontMount(t *testing.T) {
// Arrange
CommonSocketLocations = originalCommonSocketLocations
dockerHost := "unix:///my/docker/host.sock"
os.Setenv("DOCKER_HOST", dockerHost)
// Act
ret, err := GetSocketAndHost("-")
// Assert
assert.Nil(t, err)
assert.Equal(t, SocketAndHost{"-", dockerHost}, ret)
}
func TestGetSocketAndHostNoHostNoSocket(t *testing.T) {
// Arrange
CommonSocketLocations = originalCommonSocketLocations
os.Unsetenv("DOCKER_HOST")
defaultSocket, found := socketLocation()
// Act
ret, err := GetSocketAndHost("")
// Assert
assert.Equal(t, true, found, "Expected a default socket to be found")
assert.Nil(t, err, "Expected no error from GetSocketAndHost")
assert.Equal(t, SocketAndHost{defaultSocket, defaultSocket}, ret, "Expected to match default socket location")
}
// Catch
// > Your code breaks setting DOCKER_HOST if shouldMount is false.
// > This happens if neither DOCKER_HOST nor --container-daemon-socket has a value, but socketLocation() returns a URI
func TestGetSocketAndHostNoHostNoSocketDefaultLocation(t *testing.T) {
// Arrange
mySocketFile, tmpErr := os.CreateTemp("", "act-*.sock")
mySocket := mySocketFile.Name()
unixSocket := "unix://" + mySocket
defer os.RemoveAll(mySocket)
assert.NoError(t, tmpErr)
os.Unsetenv("DOCKER_HOST")
CommonSocketLocations = []string{mySocket}
defaultSocket, found := socketLocation()
// Act
ret, err := GetSocketAndHost("")
// Assert
assert.Equal(t, unixSocket, defaultSocket, "Expected default socket to match common socket location")
assert.Equal(t, true, found, "Expected default socket to be found")
assert.Nil(t, err, "Expected no error from GetSocketAndHost")
assert.Equal(t, SocketAndHost{unixSocket, unixSocket}, ret, "Expected to match default socket location")
}
func TestGetSocketAndHostNoHostInvalidSocket(t *testing.T) {
// Arrange
os.Unsetenv("DOCKER_HOST")
mySocket := "/my/socket/path.sock"
CommonSocketLocations = []string{"/unusual", "/socket", "/location"}
defaultSocket, found := socketLocation()
// Act
ret, err := GetSocketAndHost(mySocket)
// Assert
assert.Equal(t, false, found, "Expected no default socket to be found")
assert.Equal(t, "", defaultSocket, "Expected no default socket to be found")
assert.Equal(t, SocketAndHost{}, ret, "Expected to match default socket location")
assert.Error(t, err, "Expected an error in invalid state")
}
func TestGetSocketAndHostOnlySocketValidButUnusualLocation(t *testing.T) {
// Arrange
socketURI := "unix:///path/to/my.socket"
CommonSocketLocations = []string{"/unusual", "/location"}
os.Unsetenv("DOCKER_HOST")
defaultSocket, found := socketLocation()
// Act
ret, err := GetSocketAndHost(socketURI)
// Assert
// Default socket locations
assert.Equal(t, "", defaultSocket, "Expect default socket location to be empty")
assert.Equal(t, false, found, "Expected no default socket to be found")
// Sane default
assert.Nil(t, err, "Expect no error from GetSocketAndHost")
assert.Equal(t, socketURI, ret.Host, "Expect host to default to unusual socket")
}

View File

@ -1,4 +1,4 @@
//go:build WITHOUT_DOCKER || !(linux || darwin || windows)
//go:build WITHOUT_DOCKER || !(linux || darwin || windows || netbsd)
package container

View File

@ -1,4 +1,4 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows))
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container

View File

@ -21,6 +21,7 @@ import (
"golang.org/x/term"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/filecollector"
"github.com/nektos/act/pkg/lookpath"
)
@ -71,7 +72,7 @@ func (e *HostEnvironment) CopyTarStream(ctx context.Context, destPath string, ta
return err
}
tr := tar.NewReader(tarStream)
cp := &copyCollector{
cp := &filecollector.CopyCollector{
DstDir: destPath,
}
for {
@ -110,16 +111,16 @@ func (e *HostEnvironment) CopyDir(destPath string, srcPath string, useGitIgnore
ignorer = gitignore.NewMatcher(ps)
}
fc := &fileCollector{
Fs: &defaultFs{},
fc := &filecollector.FileCollector{
Fs: &filecollector.DefaultFs{},
Ignorer: ignorer,
SrcPath: srcPath,
SrcPrefix: srcPrefix,
Handler: &copyCollector{
Handler: &filecollector.CopyCollector{
DstDir: destPath,
},
}
return filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
return filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
}
}
@ -132,21 +133,21 @@ func (e *HostEnvironment) GetContainerArchive(ctx context.Context, srcPath strin
if err != nil {
return nil, err
}
tc := &tarCollector{
tc := &filecollector.TarCollector{
TarWriter: tw,
}
if fi.IsDir() {
srcPrefix := filepath.Dir(srcPath)
srcPrefix := srcPath
if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) {
srcPrefix += string(filepath.Separator)
}
fc := &fileCollector{
Fs: &defaultFs{},
fc := &filecollector.FileCollector{
Fs: &filecollector.DefaultFs{},
SrcPath: srcPath,
SrcPrefix: srcPrefix,
Handler: tc,
}
err = filepath.Walk(srcPath, fc.collectFiles(ctx, []string{}))
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
if err != nil {
return nil, err
}

View File

@ -1,4 +1,71 @@
package container
import (
"archive/tar"
"context"
"io"
"os"
"path"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
// Type assert HostEnvironment implements ExecutionsEnvironment
var _ ExecutionsEnvironment = &HostEnvironment{}
func TestCopyDir(t *testing.T) {
dir, err := os.MkdirTemp("", "test-host-env-*")
assert.NoError(t, err)
defer os.RemoveAll(dir)
ctx := context.Background()
e := &HostEnvironment{
Path: filepath.Join(dir, "path"),
TmpDir: filepath.Join(dir, "tmp"),
ToolCache: filepath.Join(dir, "tool_cache"),
ActPath: filepath.Join(dir, "act_path"),
StdOut: os.Stdout,
Workdir: path.Join("testdata", "scratch"),
}
_ = os.MkdirAll(e.Path, 0700)
_ = os.MkdirAll(e.TmpDir, 0700)
_ = os.MkdirAll(e.ToolCache, 0700)
_ = os.MkdirAll(e.ActPath, 0700)
err = e.CopyDir(e.Workdir, e.Path, true)(ctx)
assert.NoError(t, err)
}
func TestGetContainerArchive(t *testing.T) {
dir, err := os.MkdirTemp("", "test-host-env-*")
assert.NoError(t, err)
defer os.RemoveAll(dir)
ctx := context.Background()
e := &HostEnvironment{
Path: filepath.Join(dir, "path"),
TmpDir: filepath.Join(dir, "tmp"),
ToolCache: filepath.Join(dir, "tool_cache"),
ActPath: filepath.Join(dir, "act_path"),
StdOut: os.Stdout,
Workdir: path.Join("testdata", "scratch"),
}
_ = os.MkdirAll(e.Path, 0700)
_ = os.MkdirAll(e.TmpDir, 0700)
_ = os.MkdirAll(e.ToolCache, 0700)
_ = os.MkdirAll(e.ActPath, 0700)
expectedContent := []byte("sdde/7sh")
err = os.WriteFile(filepath.Join(e.Path, "action.yml"), expectedContent, 0600)
assert.NoError(t, err)
archive, err := e.GetContainerArchive(ctx, e.Path)
assert.NoError(t, err)
defer archive.Close()
reader := tar.NewReader(archive)
h, err := reader.Next()
assert.NoError(t, err)
assert.Equal(t, "action.yml", h.Name)
content, err := io.ReadAll(reader)
assert.NoError(t, err)
assert.Equal(t, expectedContent, content)
_, err = reader.Next()
assert.ErrorIs(t, err, io.EOF)
}

View File

@ -0,0 +1 @@
testfile

View File

@ -230,6 +230,7 @@ func TestFunctionFormat(t *testing.T) {
{"format('{0', '{1}', 'World')", nil, "Unclosed brackets. The following format string is invalid: '{0'", "format-invalid-format-string"},
{"format('{2}', '{1}', 'World')", "", "The following format string references more arguments than were supplied: '{2}'", "format-invalid-replacement-reference"},
{"format('{2147483648}')", "", "The following format string is invalid: '{2147483648}'", "format-invalid-replacement-reference"},
{"format('{0} {1} {2} {3}', 1.0, 1.1, 1234567890.0, 12345678901234567890.0)", "1 1.1 1234567890 1.23456789012346E+19", nil, "format-floats"},
}
env := &EvaluationEnvironment{

View File

@ -449,7 +449,7 @@ func (impl *interperterImpl) coerceToString(value reflect.Value) reflect.Value {
} else if math.IsInf(value.Float(), -1) {
return reflect.ValueOf("-Infinity")
}
return reflect.ValueOf(fmt.Sprint(value))
return reflect.ValueOf(fmt.Sprintf("%.15G", value.Float()))
case reflect.Slice:
return reflect.ValueOf("Array")

View File

@ -1,4 +1,4 @@
package container
package filecollector
import (
"archive/tar"
@ -17,18 +17,18 @@ import (
"github.com/go-git/go-git/v5/plumbing/format/index"
)
type fileCollectorHandler interface {
type Handler interface {
WriteFile(path string, fi fs.FileInfo, linkName string, f io.Reader) error
}
type tarCollector struct {
type TarCollector struct {
TarWriter *tar.Writer
UID int
GID int
DstDir string
}
func (tc tarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
func (tc TarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
// create a new dir/file header
header, err := tar.FileInfoHeader(fi, linkName)
if err != nil {
@ -59,11 +59,11 @@ func (tc tarCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string,
return nil
}
type copyCollector struct {
type CopyCollector struct {
DstDir string
}
func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
func (cc *CopyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string, f io.Reader) error {
fdestpath := filepath.Join(cc.DstDir, fpath)
if err := os.MkdirAll(filepath.Dir(fdestpath), 0o777); err != nil {
return err
@ -82,29 +82,29 @@ func (cc *copyCollector) WriteFile(fpath string, fi fs.FileInfo, linkName string
return nil
}
type fileCollector struct {
type FileCollector struct {
Ignorer gitignore.Matcher
SrcPath string
SrcPrefix string
Fs fileCollectorFs
Handler fileCollectorHandler
Fs Fs
Handler Handler
}
type fileCollectorFs interface {
type Fs interface {
Walk(root string, fn filepath.WalkFunc) error
OpenGitIndex(path string) (*index.Index, error)
Open(path string) (io.ReadCloser, error)
Readlink(path string) (string, error)
}
type defaultFs struct {
type DefaultFs struct {
}
func (*defaultFs) Walk(root string, fn filepath.WalkFunc) error {
func (*DefaultFs) Walk(root string, fn filepath.WalkFunc) error {
return filepath.Walk(root, fn)
}
func (*defaultFs) OpenGitIndex(path string) (*index.Index, error) {
func (*DefaultFs) OpenGitIndex(path string) (*index.Index, error) {
r, err := git.PlainOpen(path)
if err != nil {
return nil, err
@ -116,16 +116,16 @@ func (*defaultFs) OpenGitIndex(path string) (*index.Index, error) {
return i, nil
}
func (*defaultFs) Open(path string) (io.ReadCloser, error) {
func (*DefaultFs) Open(path string) (io.ReadCloser, error) {
return os.Open(path)
}
func (*defaultFs) Readlink(path string) (string, error) {
func (*DefaultFs) Readlink(path string) (string, error) {
return os.Readlink(path)
}
//nolint:gocyclo
func (fc *fileCollector) collectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
func (fc *FileCollector) CollectFiles(ctx context.Context, submodulePath []string) filepath.WalkFunc {
i, _ := fc.Fs.OpenGitIndex(path.Join(fc.SrcPath, path.Join(submodulePath...)))
return func(file string, fi os.FileInfo, err error) error {
if err != nil {
@ -166,7 +166,7 @@ func (fc *fileCollector) collectFiles(ctx context.Context, submodulePath []strin
}
}
if err == nil && entry.Mode == filemode.Submodule {
err = fc.Fs.Walk(file, fc.collectFiles(ctx, split))
err = fc.Fs.Walk(file, fc.CollectFiles(ctx, split))
if err != nil {
return err
}

View File

@ -1,4 +1,4 @@
package container
package filecollector
import (
"archive/tar"
@ -95,16 +95,16 @@ func TestIgnoredTrackedfile(t *testing.T) {
tw := tar.NewWriter(tmpTar)
ps, _ := gitignore.ReadPatterns(worktree, []string{})
ignorer := gitignore.NewMatcher(ps)
fc := &fileCollector{
fc := &FileCollector{
Fs: &memoryFs{Filesystem: fs},
Ignorer: ignorer,
SrcPath: "mygitrepo",
SrcPrefix: "mygitrepo" + string(filepath.Separator),
Handler: &tarCollector{
Handler: &TarCollector{
TarWriter: tw,
},
}
err := fc.Fs.Walk("mygitrepo", fc.collectFiles(context.Background(), []string{}))
err := fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{}))
assert.NoError(t, err, "successfully collect files")
tw.Close()
_, _ = tmpTar.Seek(0, io.SeekStart)
@ -115,3 +115,58 @@ func TestIgnoredTrackedfile(t *testing.T) {
_, err = tr.Next()
assert.ErrorIs(t, err, io.EOF, "tar must only contain one element")
}
func TestSymlinks(t *testing.T) {
fs := memfs.New()
_ = fs.MkdirAll("mygitrepo/.git", 0o777)
dotgit, _ := fs.Chroot("mygitrepo/.git")
worktree, _ := fs.Chroot("mygitrepo")
repo, _ := git.Init(filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()), worktree)
// This file shouldn't be in the tar
f, err := worktree.Create(".env")
assert.NoError(t, err)
_, err = f.Write([]byte("test=val1\n"))
assert.NoError(t, err)
f.Close()
err = worktree.Symlink(".env", "test.env")
assert.NoError(t, err)
w, err := repo.Worktree()
assert.NoError(t, err)
// .gitignore is in the tar after adding it to the index
_, err = w.Add(".env")
assert.NoError(t, err)
_, err = w.Add("test.env")
assert.NoError(t, err)
tmpTar, _ := fs.Create("temp.tar")
tw := tar.NewWriter(tmpTar)
ps, _ := gitignore.ReadPatterns(worktree, []string{})
ignorer := gitignore.NewMatcher(ps)
fc := &FileCollector{
Fs: &memoryFs{Filesystem: fs},
Ignorer: ignorer,
SrcPath: "mygitrepo",
SrcPrefix: "mygitrepo" + string(filepath.Separator),
Handler: &TarCollector{
TarWriter: tw,
},
}
err = fc.Fs.Walk("mygitrepo", fc.CollectFiles(context.Background(), []string{}))
assert.NoError(t, err, "successfully collect files")
tw.Close()
_, _ = tmpTar.Seek(0, io.SeekStart)
tr := tar.NewReader(tmpTar)
h, err := tr.Next()
files := map[string]tar.Header{}
for err == nil {
files[h.Name] = *h
h, err = tr.Next()
}
assert.Equal(t, ".env", files[".env"].Name)
assert.Equal(t, "test.env", files["test.env"].Name)
assert.Equal(t, ".env", files["test.env"].Linkname)
assert.ErrorIs(t, err, io.EOF, "tar must be read cleanly to EOF")
}

View File

@ -168,7 +168,7 @@ func (ghc *GithubContext) SetRepositoryAndOwner(ctx context.Context, githubInsta
if ghc.Repository == "" {
repo, err := git.FindGithubRepo(ctx, repoPath, githubInstance, remoteName)
if err != nil {
common.Logger(ctx).Warningf("unable to get git repo: %v", err)
common.Logger(ctx).Warningf("unable to get git repo (githubInstance: %v; remoteName: %v, repoPath: %v): %v", githubInstance, remoteName, repoPath, err)
return
}
ghc.Repository = repo

View File

@ -148,12 +148,10 @@ func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, e
workflow.Name = wf.workflowDirEntry.Name()
}
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
for k := range workflow.Jobs {
if ok := jobNameRegex.MatchString(k); !ok {
_ = f.Close()
return nil, fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
}
err = validateJobName(workflow)
if err != nil {
_ = f.Close()
return nil, err
}
wp.workflows = append(wp.workflows, workflow)
@ -171,6 +169,42 @@ func CombineWorkflowPlanner(workflows ...*Workflow) WorkflowPlanner {
}
}
func NewSingleWorkflowPlanner(name string, f io.Reader) (WorkflowPlanner, error) {
wp := new(workflowPlanner)
log.Debugf("Reading workflow %s", name)
workflow, err := ReadWorkflow(f)
if err != nil {
if err == io.EOF {
return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", name, err)
}
return nil, fmt.Errorf("workflow is not valid. '%s': %w", name, err)
}
workflow.File = name
if workflow.Name == "" {
workflow.Name = name
}
err = validateJobName(workflow)
if err != nil {
return nil, err
}
wp.workflows = append(wp.workflows, workflow)
return wp, nil
}
func validateJobName(workflow *Workflow) error {
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
for k := range workflow.Jobs {
if ok := jobNameRegex.MatchString(k); !ok {
return fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
}
}
return nil
}
type workflowPlanner struct {
workflows []*Workflow
}

View File

@ -103,22 +103,40 @@ type WorkflowDispatch struct {
}
func (w *Workflow) WorkflowDispatchConfig() *WorkflowDispatch {
if w.RawOn.Kind != yaml.MappingNode {
switch w.RawOn.Kind {
case yaml.ScalarNode:
var val string
if !decodeNode(w.RawOn, &val) {
return nil
}
if val == "workflow_dispatch" {
return &WorkflowDispatch{}
}
case yaml.SequenceNode:
var val []string
if !decodeNode(w.RawOn, &val) {
return nil
}
for _, v := range val {
if v == "workflow_dispatch" {
return &WorkflowDispatch{}
}
}
case yaml.MappingNode:
var val map[string]yaml.Node
if !decodeNode(w.RawOn, &val) {
return nil
}
n, found := val["workflow_dispatch"]
var workflowDispatch WorkflowDispatch
if found && decodeNode(n, &workflowDispatch) {
return &workflowDispatch
}
default:
return nil
}
var val map[string]yaml.Node
if !decodeNode(w.RawOn, &val) {
return nil
}
var config WorkflowDispatch
node := val["workflow_dispatch"]
if !decodeNode(node, &config) {
return nil
}
return &config
return nil
}
type WorkflowCallInput struct {
@ -299,15 +317,39 @@ func (j *Job) Needs() []string {
// RunsOn list for Job
func (j *Job) RunsOn() []string {
switch j.RawRunsOn.Kind {
case yaml.MappingNode:
var val struct {
Group string
Labels yaml.Node
}
if !decodeNode(j.RawRunsOn, &val) {
return nil
}
labels := nodeAsStringSlice(val.Labels)
if val.Group != "" {
labels = append(labels, val.Group)
}
return labels
default:
return nodeAsStringSlice(j.RawRunsOn)
}
}
func nodeAsStringSlice(node yaml.Node) []string {
switch node.Kind {
case yaml.ScalarNode:
var val string
if !decodeNode(j.RawRunsOn, &val) {
if !decodeNode(node, &val) {
return nil
}
return []string{val}
case yaml.SequenceNode:
var val []string
if !decodeNode(j.RawRunsOn, &val) {
if !decodeNode(node, &val) {
return nil
}
return val

View File

@ -153,6 +153,41 @@ jobs:
assert.Contains(t, workflow.On(), "pull_request")
}
func TestReadWorkflow_RunsOnLabels(t *testing.T) {
yaml := `
name: local-action-docker-url
jobs:
test:
container: nginx:latest
runs-on:
labels: ubuntu-latest
steps:
- uses: ./actions/docker-url`
workflow, err := ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest"})
}
func TestReadWorkflow_RunsOnLabelsWithGroup(t *testing.T) {
yaml := `
name: local-action-docker-url
jobs:
test:
container: nginx:latest
runs-on:
labels: [ubuntu-latest]
group: linux
steps:
- uses: ./actions/docker-url`
workflow, err := ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
assert.Equal(t, workflow.Jobs["test"].RunsOn(), []string{"ubuntu-latest", "linux"})
}
func TestReadWorkflow_StringContainer(t *testing.T) {
yaml := `
name: local-action-docker-url
@ -464,3 +499,107 @@ func TestStep_ShellCommand(t *testing.T) {
})
}
}
func TestReadWorkflow_WorkflowDispatchConfig(t *testing.T) {
yaml := `
name: local-action-docker-url
`
workflow, err := ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch := workflow.WorkflowDispatchConfig()
assert.Nil(t, workflowDispatch)
yaml = `
name: local-action-docker-url
on: push
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.Nil(t, workflowDispatch)
yaml = `
name: local-action-docker-url
on: workflow_dispatch
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.NotNil(t, workflowDispatch)
assert.Nil(t, workflowDispatch.Inputs)
yaml = `
name: local-action-docker-url
on: [push, pull_request]
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.Nil(t, workflowDispatch)
yaml = `
name: local-action-docker-url
on: [push, workflow_dispatch]
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.NotNil(t, workflowDispatch)
assert.Nil(t, workflowDispatch.Inputs)
yaml = `
name: local-action-docker-url
on:
- push
- workflow_dispatch
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.NotNil(t, workflowDispatch)
assert.Nil(t, workflowDispatch.Inputs)
yaml = `
name: local-action-docker-url
on:
push:
pull_request:
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.Nil(t, workflowDispatch)
yaml = `
name: local-action-docker-url
on:
push:
pull_request:
workflow_dispatch:
inputs:
logLevel:
description: 'Log level'
required: true
default: 'warning'
type: choice
options:
- info
- warning
- debug
`
workflow, err = ReadWorkflow(strings.NewReader(yaml))
assert.NoError(t, err, "read workflow should succeed")
workflowDispatch = workflow.WorkflowDispatchConfig()
assert.NotNil(t, workflowDispatch)
assert.Equal(t, WorkflowDispatchInput{
Default: "warning",
Description: "Log level",
Options: []string{
"info",
"warning",
"debug",
},
Required: true,
Type: "choice",
}, workflowDispatch.Inputs["logLevel"])
}

View File

@ -3,6 +3,7 @@ package runner
import (
"context"
"embed"
"errors"
"fmt"
"io"
"io/fs"
@ -41,11 +42,24 @@ var trampoline embed.FS
func readActionImpl(ctx context.Context, step *model.Step, actionDir string, actionPath string, readFile actionYamlReader, writeFile fileWriter) (*model.Action, error) {
logger := common.Logger(ctx)
allErrors := []error{}
addError := func(fileName string, err error) {
if err != nil {
allErrors = append(allErrors, fmt.Errorf("failed to read '%s' from action '%s' with path '%s' of step %w", fileName, step.String(), actionPath, err))
} else {
// One successful read, clear error state
allErrors = nil
}
}
reader, closer, err := readFile("action.yml")
addError("action.yml", err)
if os.IsNotExist(err) {
reader, closer, err = readFile("action.yaml")
if err != nil {
if _, closer, err2 := readFile("Dockerfile"); err2 == nil {
addError("action.yaml", err)
if os.IsNotExist(err) {
_, closer, err := readFile("Dockerfile")
addError("Dockerfile", err)
if err == nil {
closer.Close()
action := &model.Action{
Name: "(Synthetic)",
@ -90,10 +104,10 @@ func readActionImpl(ctx context.Context, step *model.Step, actionDir string, act
return action, nil
}
}
return nil, err
}
} else if err != nil {
return nil, err
}
if allErrors != nil {
return nil, errors.Join(allErrors...)
}
defer closer.Close()
@ -110,9 +124,6 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
if stepModel.Type() != model.StepTypeUsesActionRemote {
return nil
}
if err := removeGitIgnore(ctx, actionDir); err != nil {
return err
}
var containerActionDirCopy string
containerActionDirCopy = strings.TrimSuffix(containerActionDir, actionPath)
@ -121,6 +132,21 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
if !strings.HasSuffix(containerActionDirCopy, `/`) {
containerActionDirCopy += `/`
}
if rc.Config != nil && rc.Config.ActionCache != nil {
raction := step.(*stepActionRemote)
ta, err := rc.Config.ActionCache.GetTarArchive(ctx, raction.cacheDir, raction.resolvedSha, "")
if err != nil {
return err
}
defer ta.Close()
return rc.JobContainer.CopyTarStream(ctx, containerActionDirCopy, ta)
}
if err := removeGitIgnore(ctx, actionDir); err != nil {
return err
}
return rc.JobContainer.CopyDir(containerActionDirCopy, actionDir+"/", rc.Config.UseGitIgnore)(ctx)
}
@ -281,6 +307,13 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
return err
}
defer buildContext.Close()
} else if rc.Config.ActionCache != nil {
rstep := step.(*stepActionRemote)
buildContext, err = rc.Config.ActionCache.GetTarArchive(ctx, rstep.cacheDir, rstep.resolvedSha, contextDir)
if err != nil {
return err
}
defer buildContext.Close()
}
prepImage = container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
ContextDir: contextDir,

View File

@ -42,17 +42,7 @@ func (c GoGitActionCache) Fetch(ctx context.Context, cacheDir, url, ref, token s
return "", err
}
branchName := hex.EncodeToString(tmpBranch)
var refSpec config.RefSpec
spec := config.RefSpec(ref + ":" + branchName)
tagOrSha := false
if spec.IsExactSHA1() {
refSpec = spec
} else if strings.HasPrefix(ref, "refs/") {
refSpec = config.RefSpec(ref + ":refs/heads/" + branchName)
} else {
tagOrSha = true
refSpec = config.RefSpec("refs/*/" + ref + ":refs/heads/*/" + branchName)
}
var auth transport.AuthMethod
if token != "" {
auth = &http.BasicAuth{
@ -70,32 +60,17 @@ func (c GoGitActionCache) Fetch(ctx context.Context, cacheDir, url, ref, token s
return "", err
}
defer func() {
if refs, err := gogitrepo.References(); err == nil {
_ = refs.ForEach(func(r *plumbing.Reference) error {
if strings.Contains(r.Name().String(), branchName) {
return gogitrepo.DeleteBranch(r.Name().String())
}
return nil
})
}
_ = gogitrepo.DeleteBranch(branchName)
}()
if err := remote.FetchContext(ctx, &git.FetchOptions{
RefSpecs: []config.RefSpec{
refSpec,
config.RefSpec(ref + ":" + branchName),
},
Auth: auth,
Force: true,
}); err != nil {
return "", err
}
if tagOrSha {
for _, prefix := range []string{"refs/heads/tags/", "refs/heads/heads/"} {
hash, err := gogitrepo.ResolveRevision(plumbing.Revision(prefix + branchName))
if err == nil {
return hash.String(), nil
}
}
}
hash, err := gogitrepo.ResolveRevision(plumbing.Revision(branchName))
if err != nil {
return "", err

View File

@ -0,0 +1,41 @@
package runner
import (
"context"
"io"
"path"
git "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
)
type GoGitActionCacheOfflineMode struct {
Parent GoGitActionCache
}
func (c GoGitActionCacheOfflineMode) Fetch(ctx context.Context, cacheDir, url, ref, token string) (string, error) {
sha, fetchErr := c.Parent.Fetch(ctx, cacheDir, url, ref, token)
gitPath := path.Join(c.Parent.Path, safeFilename(cacheDir)+".git")
gogitrepo, err := git.PlainOpen(gitPath)
if err != nil {
return "", fetchErr
}
refName := plumbing.ReferenceName("refs/action-cache-offline/" + ref)
r, err := gogitrepo.Reference(refName, true)
if fetchErr == nil {
if err != nil || sha != r.Hash().String() {
if err == nil {
refName = r.Name()
}
ref := plumbing.NewHashReference(refName, plumbing.NewHash(sha))
_ = gogitrepo.Storer.SetReference(ref)
}
} else if err == nil {
return r.Hash().String(), nil
}
return sha, fetchErr
}
func (c GoGitActionCacheOfflineMode) GetTarArchive(ctx context.Context, cacheDir, sha, includePrefix string) (io.ReadCloser, error) {
return c.Parent.GetTarArchive(ctx, cacheDir, sha, includePrefix)
}

View File

@ -18,20 +18,60 @@ func TestActionCache(t *testing.T) {
Path: os.TempDir(),
}
ctx := context.Background()
sha, err := cache.Fetch(ctx, "christopherhx/script", "https://github.com/christopherhx/script", "main", "")
a.NoError(err)
a.NotEmpty(sha)
atar, err := cache.GetTarArchive(ctx, "christopherhx/script", sha, "node_modules")
a.NoError(err)
a.NotEmpty(atar)
mytar := tar.NewReader(atar)
th, err := mytar.Next()
a.NoError(err)
a.NotEqual(0, th.Size)
buf := &bytes.Buffer{}
// G110: Potential DoS vulnerability via decompression bomb (gosec)
_, err = io.Copy(buf, mytar)
a.NoError(err)
str := buf.String()
a.NotEmpty(str)
cacheDir := "nektos/act-test-actions"
repo := "https://github.com/nektos/act-test-actions"
refs := []struct {
Name string
CacheDir string
Repo string
Ref string
}{
{
Name: "Fetch Branch Name",
CacheDir: cacheDir,
Repo: repo,
Ref: "main",
},
{
Name: "Fetch Branch Name Absolutely",
CacheDir: cacheDir,
Repo: repo,
Ref: "refs/heads/main",
},
{
Name: "Fetch HEAD",
CacheDir: cacheDir,
Repo: repo,
Ref: "HEAD",
},
{
Name: "Fetch Sha",
CacheDir: cacheDir,
Repo: repo,
Ref: "de984ca37e4df4cb9fd9256435a3b82c4a2662b1",
},
}
for _, c := range refs {
t.Run(c.Name, func(t *testing.T) {
sha, err := cache.Fetch(ctx, c.CacheDir, c.Repo, c.Ref, "")
if !a.NoError(err) || !a.NotEmpty(sha) {
return
}
atar, err := cache.GetTarArchive(ctx, c.CacheDir, sha, "js")
if !a.NoError(err) || !a.NotEmpty(atar) {
return
}
mytar := tar.NewReader(atar)
th, err := mytar.Next()
if !a.NoError(err) || !a.NotEqual(0, th.Size) {
return
}
buf := &bytes.Buffer{}
// G110: Potential DoS vulnerability via decompression bomb (gosec)
_, err = io.Copy(buf, mytar)
a.NoError(err)
str := buf.String()
a.NotEmpty(str)
})
}
}

View File

@ -20,6 +20,7 @@ type jobInfo interface {
result(result string)
}
//nolint:contextcheck,gocyclo
func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executor {
steps := make([]common.Executor, 0)
preSteps := make([]common.Executor, 0)
@ -101,7 +102,7 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
postExec := useStepLogger(rc, stepModel, stepStagePost, step.post())
if postExecutor != nil {
// run the post exector in reverse order
// run the post executor in reverse order
postExecutor = postExec.Finally(postExecutor)
} else {
postExecutor = postExec
@ -131,8 +132,9 @@ func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executo
// if the value of `ContainerNetworkMode` is empty string,
// it means that the network to which containers are connecting is created by `act_runner`,
// so, we should remove the network at last.
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, rc.networkName())
if err := container.NewDockerNetworkRemoveExecutor(rc.networkName())(ctx); err != nil {
networkName, _ := rc.networkName()
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
logger.Errorf("Error while cleaning network: %v", err)
}
}

View File

@ -0,0 +1,91 @@
package runner
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"io/fs"
goURL "net/url"
"os"
"path/filepath"
"strings"
"github.com/nektos/act/pkg/filecollector"
)
type LocalRepositoryCache struct {
Parent ActionCache
LocalRepositories map[string]string
CacheDirCache map[string]string
}
func (l *LocalRepositoryCache) Fetch(ctx context.Context, cacheDir, url, ref, token string) (string, error) {
if dest, ok := l.LocalRepositories[fmt.Sprintf("%s@%s", url, ref)]; ok {
l.CacheDirCache[fmt.Sprintf("%s@%s", cacheDir, ref)] = dest
return ref, nil
}
if purl, err := goURL.Parse(url); err == nil {
if dest, ok := l.LocalRepositories[fmt.Sprintf("%s@%s", strings.TrimPrefix(purl.Path, "/"), ref)]; ok {
l.CacheDirCache[fmt.Sprintf("%s@%s", cacheDir, ref)] = dest
return ref, nil
}
}
return l.Parent.Fetch(ctx, cacheDir, url, ref, token)
}
func (l *LocalRepositoryCache) GetTarArchive(ctx context.Context, cacheDir, sha, includePrefix string) (io.ReadCloser, error) {
// sha is mapped to ref in fetch if there is a local override
if dest, ok := l.CacheDirCache[fmt.Sprintf("%s@%s", cacheDir, sha)]; ok {
srcPath := filepath.Join(dest, includePrefix)
buf := &bytes.Buffer{}
tw := tar.NewWriter(buf)
defer tw.Close()
srcPath = filepath.Clean(srcPath)
fi, err := os.Lstat(srcPath)
if err != nil {
return nil, err
}
tc := &filecollector.TarCollector{
TarWriter: tw,
}
if fi.IsDir() {
srcPrefix := srcPath
if !strings.HasSuffix(srcPrefix, string(filepath.Separator)) {
srcPrefix += string(filepath.Separator)
}
fc := &filecollector.FileCollector{
Fs: &filecollector.DefaultFs{},
SrcPath: srcPath,
SrcPrefix: srcPrefix,
Handler: tc,
}
err = filepath.Walk(srcPath, fc.CollectFiles(ctx, []string{}))
if err != nil {
return nil, err
}
} else {
var f io.ReadCloser
var linkname string
if fi.Mode()&fs.ModeSymlink != 0 {
linkname, err = os.Readlink(srcPath)
if err != nil {
return nil, err
}
} else {
f, err = os.Open(srcPath)
if err != nil {
return nil, err
}
defer f.Close()
}
err := tc.WriteFile(fi.Name(), fi, linkname, f)
if err != nil {
return nil, err
}
}
return io.NopCloser(buf), nil
}
return l.Parent.GetTarArchive(ctx, cacheDir, sha, includePrefix)
}

View File

@ -1,6 +1,7 @@
package runner
import (
"archive/tar"
"context"
"errors"
"fmt"
@ -64,6 +65,10 @@ func newRemoteReusableWorkflowExecutor(rc *RunContext) common.Executor {
filename := fmt.Sprintf("%s/%s@%s", remoteReusableWorkflow.Org, remoteReusableWorkflow.Repo, remoteReusableWorkflow.Ref)
workflowDir := fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(filename))
if rc.Config.ActionCache != nil {
return newActionCacheReusableWorkflowExecutor(rc, filename, remoteReusableWorkflow)
}
// FIXME: if the reusable workflow is from a private repository, we need to provide a token to access the repository.
token := ""
@ -73,6 +78,41 @@ func newRemoteReusableWorkflowExecutor(rc *RunContext) common.Executor {
)
}
func newActionCacheReusableWorkflowExecutor(rc *RunContext, filename string, remoteReusableWorkflow *remoteReusableWorkflow) common.Executor {
return func(ctx context.Context) error {
ghctx := rc.getGithubContext(ctx)
remoteReusableWorkflow.URL = ghctx.ServerURL
sha, err := rc.Config.ActionCache.Fetch(ctx, filename, remoteReusableWorkflow.CloneURL(), remoteReusableWorkflow.Ref, ghctx.Token)
if err != nil {
return err
}
archive, err := rc.Config.ActionCache.GetTarArchive(ctx, filename, sha, fmt.Sprintf(".github/workflows/%s", remoteReusableWorkflow.Filename))
if err != nil {
return err
}
defer archive.Close()
treader := tar.NewReader(archive)
if _, err = treader.Next(); err != nil {
return err
}
planner, err := model.NewSingleWorkflowPlanner(remoteReusableWorkflow.Filename, treader)
if err != nil {
return err
}
plan, err := planner.PlanEvent("workflow_call")
if err != nil {
return err
}
runner, err := NewReusableWorkflowRunner(rc)
if err != nil {
return err
}
return runner.NewPlanExecutor(plan)(ctx)
}
}
var (
executorLock sync.Mutex
)
@ -99,10 +139,11 @@ func cloneIfRequired(rc *RunContext, remoteReusableWorkflow remoteReusableWorkfl
// 2. Gitea has already full URL with rc.Config.GitHubInstance when calling newRemoteReusableWorkflowWithPlat
// remoteReusableWorkflow.URL = rc.getGithubContext(ctx).ServerURL
return git.NewGitCloneExecutor(git.NewGitCloneExecutorInput{
URL: remoteReusableWorkflow.CloneURL(),
Ref: remoteReusableWorkflow.Ref,
Dir: targetDirectory,
Token: token,
URL: remoteReusableWorkflow.CloneURL(),
Ref: remoteReusableWorkflow.Ref,
Dir: targetDirectory,
Token: token,
OfflineMode: rc.Config.ActionOfflineMode,
})(ctx)
},
nil,

View File

@ -18,6 +18,7 @@ import (
"strings"
"time"
"github.com/docker/go-connections/nat"
"github.com/opencontainers/selinux/go-selinux"
"github.com/nektos/act/pkg/common"
@ -65,7 +66,7 @@ func (rc *RunContext) String() string {
if rc.caller != nil {
// prefix the reusable workflow with the caller job
// this is required to create unique container names
name = fmt.Sprintf("%s/%s", rc.caller.runContext.Run.JobID, name)
name = fmt.Sprintf("%s/%s", rc.caller.runContext.Name, name)
}
return name
}
@ -95,9 +96,15 @@ func (rc *RunContext) jobContainerName() string {
}
// networkName return the name of the network which will be created by `act` automatically for job,
// only create network if `rc.Config.ContainerNetworkMode` is empty string.
func (rc *RunContext) networkName() string {
return fmt.Sprintf("%s-network", rc.jobContainerName())
// only create network if using a service container
func (rc *RunContext) networkName() (string, bool) {
if len(rc.Run.Job().Services) >= 0 { // For Gitea, always create network
return fmt.Sprintf("%s-%s-network", rc.jobContainerName(), rc.Run.JobID), true
}
if rc.Config.ContainerNetworkMode == "" {
return "host", false
}
return string(rc.Config.ContainerNetworkMode), false
}
func getDockerDaemonSocketMountPath(daemonPath string) string {
@ -135,7 +142,7 @@ func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
ext := container.LinuxContainerEnvironmentExtensions{}
mounts := map[string]string{
"act-toolcache": "/toolcache",
"act-toolcache": "/opt/hostedtoolcache",
name + "-env": ext.GetActPath(),
}
@ -247,6 +254,7 @@ func (rc *RunContext) startHostEnvironment() common.Executor {
}
}
//nolint:gocyclo
func (rc *RunContext) startJobContainer() common.Executor {
return func(ctx context.Context) error {
logger := common.Logger(ctx)
@ -284,15 +292,12 @@ func (rc *RunContext) startJobContainer() common.Executor {
binds, mounts := rc.GetBindsAndMounts()
// specify the network to which the container will connect when `docker create` stage. (like execute command line: docker create --network <networkName> <image>)
networkName := string(rc.Config.ContainerNetworkMode)
if networkName == "" {
// if networkName is empty string, will create a new network for the containers.
// and it will be removed after at last.
networkName = rc.networkName()
}
// if using service containers, will create a new network for the containers.
// and it will be removed after at last.
networkName, createAndDeleteNetwork := rc.networkName()
// add service containers
for serviceId, spec := range rc.Run.Job().Services {
for serviceID, spec := range rc.Run.Job().Services {
// interpolate env
interpolatedEnvs := make(map[string]string, len(spec.Env))
for k, v := range spec.Env {
@ -302,24 +307,33 @@ func (rc *RunContext) startJobContainer() common.Executor {
for k, v := range interpolatedEnvs {
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
}
// interpolate cmd
interpolatedCmd := make([]string, 0, len(spec.Cmd))
for _, v := range spec.Cmd {
interpolatedCmd = append(interpolatedCmd, rc.ExprEval.Interpolate(ctx, v))
}
username, password, err := rc.handleServiceCredentials(ctx, spec.Credentials)
username, password, err = rc.handleServiceCredentials(ctx, spec.Credentials)
if err != nil {
return fmt.Errorf("failed to handle service %s credentials: %w", serviceId, err)
return fmt.Errorf("failed to handle service %s credentials: %w", serviceID, err)
}
serviceBinds, serviceMounts := rc.GetServiceBindsAndMounts(spec.Volumes)
serviceContainerName := createSimpleContainerName(rc.jobContainerName(), serviceId)
interpolatedVolumes := make([]string, 0, len(spec.Volumes))
for _, volume := range spec.Volumes {
interpolatedVolumes = append(interpolatedVolumes, rc.ExprEval.Interpolate(ctx, volume))
}
serviceBinds, serviceMounts := rc.GetServiceBindsAndMounts(interpolatedVolumes)
interpolatedPorts := make([]string, 0, len(spec.Ports))
for _, port := range spec.Ports {
interpolatedPorts = append(interpolatedPorts, rc.ExprEval.Interpolate(ctx, port))
}
exposedPorts, portBindings, err := nat.ParsePortSpecs(interpolatedPorts)
if err != nil {
return fmt.Errorf("failed to parse service %s ports: %w", serviceID, err)
}
serviceContainerName := createContainerName(rc.jobContainerName(), serviceID)
c := container.NewContainer(&container.NewContainerInput{
Name: serviceContainerName,
WorkingDir: ext.ToContainerPath(rc.Config.Workdir),
Image: spec.Image,
Image: rc.ExprEval.Interpolate(ctx, spec.Image),
Username: username,
Password: password,
Cmd: interpolatedCmd,
Env: envs,
Mounts: serviceMounts,
Binds: serviceBinds,
@ -328,24 +342,54 @@ func (rc *RunContext) startJobContainer() common.Executor {
Privileged: rc.Config.Privileged,
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
AutoRemove: rc.Config.AutoRemove,
Options: spec.Options,
Options: rc.ExprEval.Interpolate(ctx, spec.Options),
NetworkMode: networkName,
NetworkAliases: []string{serviceId},
ValidVolumes: rc.Config.ValidVolumes,
NetworkAliases: []string{serviceID},
ExposedPorts: exposedPorts,
PortBindings: portBindings,
})
rc.ServiceContainers = append(rc.ServiceContainers, c)
}
rc.cleanUpJobContainer = func(ctx context.Context) error {
if rc.JobContainer != nil && !rc.Config.ReuseContainers {
return rc.JobContainer.Remove().
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false)).
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName()+"-env", false))(ctx)
reuseJobContainer := func(ctx context.Context) bool {
return rc.Config.ReuseContainers
}
if rc.JobContainer != nil {
return rc.JobContainer.Remove().IfNot(reuseJobContainer).
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false)).IfNot(reuseJobContainer).
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName()+"-env", false)).IfNot(reuseJobContainer).
Then(func(ctx context.Context) error {
if len(rc.ServiceContainers) > 0 {
logger.Infof("Cleaning up services for job %s", rc.JobName)
if err := rc.stopServiceContainers()(ctx); err != nil {
logger.Errorf("Error while cleaning services: %v", err)
}
if createAndDeleteNetwork {
// clean network if it has been created by act
// if using service containers
// it means that the network to which containers are connecting is created by `act_runner`,
// so, we should remove the network at last.
logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
logger.Errorf("Error while cleaning network: %v", err)
}
}
}
return nil
})(ctx)
}
return nil
}
jobContainerNetwork := rc.Config.ContainerNetworkMode.NetworkName()
if rc.containerImage(ctx) != "" {
jobContainerNetwork = networkName
} else if jobContainerNetwork == "" {
jobContainerNetwork = "host"
}
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
Cmd: nil,
Entrypoint: []string{"/bin/sleep", fmt.Sprint(rc.Config.ContainerMaxLifetime.Round(time.Second).Seconds())},
@ -356,7 +400,7 @@ func (rc *RunContext) startJobContainer() common.Executor {
Name: name,
Env: envList,
Mounts: mounts,
NetworkMode: networkName,
NetworkMode: jobContainerNetwork,
NetworkAliases: []string{rc.Name},
Binds: binds,
Stdout: logWriter,
@ -375,7 +419,8 @@ func (rc *RunContext) startJobContainer() common.Executor {
return common.NewPipelineExecutor(
rc.pullServicesImages(rc.Config.ForcePull),
rc.JobContainer.Pull(rc.Config.ForcePull),
container.NewDockerNetworkCreateExecutor(networkName).IfBool(!rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == ""), // if the value of `ContainerNetworkMode` is empty string, then will create a new network for containers.
rc.stopJobContainer(),
container.NewDockerNetworkCreateExecutor(networkName).IfBool(createAndDeleteNetwork),
rc.startServiceContainers(networkName),
rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
rc.JobContainer.Start(false),
@ -452,10 +497,10 @@ func (rc *RunContext) UpdateExtraPath(ctx context.Context, githubEnvPath string)
return nil
}
// stopJobContainer removes the job container (if it exists) and its volume (if it exists) if !rc.Config.ReuseContainers
// stopJobContainer removes the job container (if it exists) and its volume (if it exists)
func (rc *RunContext) stopJobContainer() common.Executor {
return func(ctx context.Context) error {
if rc.cleanUpJobContainer != nil && !rc.Config.ReuseContainers {
if rc.cleanUpJobContainer != nil {
return rc.cleanUpJobContainer(ctx)
}
return nil
@ -472,7 +517,7 @@ func (rc *RunContext) pullServicesImages(forcePull bool) common.Executor {
}
}
func (rc *RunContext) startServiceContainers(networkName string) common.Executor {
func (rc *RunContext) startServiceContainers(_ string) common.Executor {
return func(ctx context.Context) error {
execs := []common.Executor{}
for _, c := range rc.ServiceContainers {
@ -490,7 +535,7 @@ func (rc *RunContext) stopServiceContainers() common.Executor {
return func(ctx context.Context) error {
execs := []common.Executor{}
for _, c := range rc.ServiceContainers {
execs = append(execs, c.Remove())
execs = append(execs, c.Remove().Finally(c.Close()))
}
return common.NewParallelExecutor(len(execs), execs...)(ctx)
}
@ -610,12 +655,11 @@ func (rc *RunContext) containerImage(ctx context.Context) string {
}
func (rc *RunContext) runsOnImage(ctx context.Context) string {
job := rc.Run.Job()
if job.RunsOn() == nil {
if rc.Run.Job().RunsOn() == nil {
common.Logger(ctx).Errorf("'runs-on' key not defined in %s", rc.String())
}
job := rc.Run.Job()
runsOn := job.RunsOn()
for i, v := range runsOn {
runsOn[i] = rc.ExprEval.Interpolate(ctx, v)
@ -627,8 +671,8 @@ func (rc *RunContext) runsOnImage(ctx context.Context) string {
}
}
for _, runnerLabel := range runsOn {
image := rc.Config.Platforms[strings.ToLower(runnerLabel)]
for _, platformName := range rc.runsOnPlatformNames(ctx) {
image := rc.Config.Platforms[strings.ToLower(platformName)]
if image != "" {
return image
}
@ -637,6 +681,21 @@ func (rc *RunContext) runsOnImage(ctx context.Context) string {
return ""
}
func (rc *RunContext) runsOnPlatformNames(ctx context.Context) []string {
job := rc.Run.Job()
if job.RunsOn() == nil {
return []string{}
}
if err := rc.ExprEval.EvaluateYamlNode(ctx, &job.RawRunsOn); err != nil {
common.Logger(ctx).Errorf("Error while evaluating runs-on: %v", err)
return []string{}
}
return job.RunsOn()
}
func (rc *RunContext) platformImage(ctx context.Context) string {
if containerImage := rc.containerImage(ctx); containerImage != "" {
return containerImage
@ -667,8 +726,6 @@ func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) {
if jobType == model.JobTypeInvalid {
return false, jobTypeErr
} else if jobType != model.JobTypeDefault {
return true, nil
}
if !runJob {
@ -676,14 +733,13 @@ func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) {
return false, nil
}
if jobType != model.JobTypeDefault {
return true, nil
}
img := rc.platformImage(ctx)
if img == "" {
if job.RunsOn() == nil {
l.Errorf("'runs-on' key not defined in %s", rc.String())
}
for _, runnerLabel := range job.RunsOn() {
platformName := rc.ExprEval.Interpolate(ctx, runnerLabel)
for _, platformName := range rc.runsOnPlatformNames(ctx) {
l.Infof("\U0001F6A7 Skipping unsupported platform -- Try running with `-P %+v=...`", platformName)
}
return false, nil
@ -960,7 +1016,6 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
env["GITHUB_REF"] = github.Ref
env["GITHUB_REF_NAME"] = github.RefName
env["GITHUB_REF_TYPE"] = github.RefType
env["GITHUB_TOKEN"] = github.Token
env["GITHUB_JOB"] = github.Job
env["GITHUB_REPOSITORY_OWNER"] = github.RepositoryOwner
env["GITHUB_RETENTION_DAYS"] = github.RetentionDays
@ -987,9 +1042,7 @@ func (rc *RunContext) withGithubEnv(ctx context.Context, github *model.GithubCon
setActionRuntimeVars(rc, env)
}
job := rc.Run.Job()
for _, runnerLabel := range job.RunsOn() {
platformName := rc.ExprEval.Interpolate(ctx, runnerLabel)
for _, platformName := range rc.runsOnPlatformNames(ctx) {
if platformName != "" {
if platformName == "ubuntu-latest" {
// hardcode current ubuntu-latest since we have no way to check that 'on the fly'

View File

@ -470,6 +470,53 @@ func createJob(t *testing.T, input string, result string) *model.Job {
return job
}
func TestRunContextRunsOnPlatformNames(t *testing.T) {
log.SetLevel(log.DebugLevel)
assertObject := assert.New(t)
rc := createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ubuntu-latest`, ""),
})
assertObject.Equal([]string{"ubuntu-latest"}, rc.runsOnPlatformNames(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ${{ 'ubuntu-latest' }}`, ""),
})
assertObject.Equal([]string{"ubuntu-latest"}, rc.runsOnPlatformNames(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: [self-hosted, my-runner]`, ""),
})
assertObject.Equal([]string{"self-hosted", "my-runner"}, rc.runsOnPlatformNames(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: [self-hosted, "${{ 'my-runner' }}"]`, ""),
})
assertObject.Equal([]string{"self-hosted", "my-runner"}, rc.runsOnPlatformNames(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ${{ fromJSON('["ubuntu-latest"]') }}`, ""),
})
assertObject.Equal([]string{"ubuntu-latest"}, rc.runsOnPlatformNames(context.Background()))
// test missing / invalid runs-on
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `name: something`, ""),
})
assertObject.Equal([]string{}, rc.runsOnPlatformNames(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on:
mapping: value`, ""),
})
assertObject.Equal([]string{}, rc.runsOnPlatformNames(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `runs-on: ${{ invalid expression }}`, ""),
})
assertObject.Equal([]string{}, rc.runsOnPlatformNames(context.Background()))
}
func TestRunContextIsEnabled(t *testing.T) {
log.SetLevel(log.DebugLevel)
assertObject := assert.New(t)
@ -572,6 +619,17 @@ if: always()`, ""),
})
rc.Run.JobID = "job2"
assertObject.True(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `uses: ./.github/workflows/reusable.yml`, ""),
})
assertObject.True(rc.isEnabled(context.Background()))
rc = createIfTestRunContext(map[string]*model.Job{
"job1": createJob(t, `uses: ./.github/workflows/reusable.yml
if: false`, ""),
})
assertObject.False(rc.isEnabled(context.Background()))
}
func TestRunContextGetEnv(t *testing.T) {

View File

@ -22,50 +22,52 @@ type Runner interface {
// Config contains the config for a new runner
type Config struct {
Actor string // the user that triggered the event
Workdir string // path to working directory
ActionCacheDir string // path used for caching action contents
BindWorkdir bool // bind the workdir to the job container
EventName string // name of event to run
EventPath string // path to JSON file to use for event.json in containers
DefaultBranch string // name of the main branch for this repository
ReuseContainers bool // reuse containers to maintain state
ForcePull bool // force pulling of the image, even if already present
ForceRebuild bool // force rebuilding local docker image action
LogOutput bool // log the output from docker run
JSONLogger bool // use json or text logger
LogPrefixJobID bool // switches from the full job name to the job id
Env map[string]string // env for containers
Inputs map[string]string // manually passed action inputs
Secrets map[string]string // list of secrets
Vars map[string]string // list of vars
Token string // GitHub token
InsecureSecrets bool // switch hiding output when printing to terminal
Platforms map[string]string // list of platforms
Privileged bool // use privileged mode
UsernsMode string // user namespace to use
ContainerArchitecture string // Desired OS/architecture platform for running containers
ContainerDaemonSocket string // Path to Docker daemon socket
ContainerOptions string // Options for the job container
UseGitIgnore bool // controls if paths in .gitignore should not be copied into container, default true
GitHubInstance string // GitHub instance to use, default "github.com"
ContainerCapAdd []string // list of kernel capabilities to add to the containers
ContainerCapDrop []string // list of kernel capabilities to remove from the containers
AutoRemove bool // controls if the container is automatically removed upon workflow completion
ArtifactServerPath string // the path where the artifact server stores uploads
ArtifactServerAddr string // the address the artifact server binds to
ArtifactServerPort string // the port the artifact server binds to
NoSkipCheckout bool // do not skip actions/checkout
RemoteName string // remote name in local git repo config
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
Matrix map[string]map[string]bool // Matrix config to run
Actor string // the user that triggered the event
Workdir string // path to working directory
ActionCacheDir string // path used for caching action contents
ActionOfflineMode bool // when offline, use caching action contents
BindWorkdir bool // bind the workdir to the job container
EventName string // name of event to run
EventPath string // path to JSON file to use for event.json in containers
DefaultBranch string // name of the main branch for this repository
ReuseContainers bool // reuse containers to maintain state
ForcePull bool // force pulling of the image, even if already present
ForceRebuild bool // force rebuilding local docker image action
LogOutput bool // log the output from docker run
JSONLogger bool // use json or text logger
LogPrefixJobID bool // switches from the full job name to the job id
Env map[string]string // env for containers
Inputs map[string]string // manually passed action inputs
Secrets map[string]string // list of secrets
Vars map[string]string // list of vars
Token string // GitHub token
InsecureSecrets bool // switch hiding output when printing to terminal
Platforms map[string]string // list of platforms
Privileged bool // use privileged mode
UsernsMode string // user namespace to use
ContainerArchitecture string // Desired OS/architecture platform for running containers
ContainerDaemonSocket string // Path to Docker daemon socket
ContainerOptions string // Options for the job container
UseGitIgnore bool // controls if paths in .gitignore should not be copied into container, default true
GitHubInstance string // GitHub instance to use, default "github.com"
ContainerCapAdd []string // list of kernel capabilities to add to the containers
ContainerCapDrop []string // list of kernel capabilities to remove from the containers
AutoRemove bool // controls if the container is automatically removed upon workflow completion
ArtifactServerPath string // the path where the artifact server stores uploads
ArtifactServerAddr string // the address the artifact server binds to
ArtifactServerPort string // the port the artifact server binds to
NoSkipCheckout bool // do not skip actions/checkout
RemoteName string // remote name in local git repo config
ReplaceGheActionWithGithubCom []string // Use actions from GitHub Enterprise instance to GitHub
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
Matrix map[string]map[string]bool // Matrix config to run
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
ActionCache ActionCache // Use a custom ActionCache Implementation
PresetGitHubContext *model.GithubContext // the preset github context, overrides some fields like DefaultBranch, Env, Secrets etc.
EventJSON string // the content of JSON file to use for event.json in containers, overrides EventPath
ContainerNamePrefix string // the prefix of container name
ContainerMaxLifetime time.Duration // the max lifetime of job containers
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
DefaultActionInstance string // the default actions web site
PlatformPicker func(labels []string) string // platform picker, it will take precedence over Platforms if isn't nil
JobLoggerLevel *log.Level // the level of job logger

View File

@ -6,6 +6,7 @@ import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime"
"strings"
@ -14,6 +15,7 @@ import (
"github.com/joho/godotenv"
log "github.com/sirupsen/logrus"
assert "github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/model"
@ -187,6 +189,7 @@ func (j *TestJobFileInfo) runTest(ctx context.Context, t *testing.T, cfg *Config
GitHubInstance: "github.com",
ContainerArchitecture: cfg.ContainerArchitecture,
Matrix: cfg.Matrix,
ActionCache: cfg.ActionCache,
}
runner, err := New(runnerConfig)
@ -209,6 +212,10 @@ func (j *TestJobFileInfo) runTest(ctx context.Context, t *testing.T, cfg *Config
fmt.Println("::endgroup::")
}
type TestConfig struct {
LocalRepositories map[string]string `yaml:"local-repositories"`
}
func TestRunEvent(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
@ -302,6 +309,14 @@ func TestRunEvent(t *testing.T) {
{workdir, "set-env-step-env-override", "push", "", platforms, secrets},
{workdir, "set-env-new-env-file-per-step", "push", "", platforms, secrets},
{workdir, "no-panic-on-invalid-composite-action", "push", "jobs failed due to invalid action", platforms, secrets},
// services
{workdir, "services", "push", "", platforms, secrets},
{workdir, "services-host-network", "push", "", platforms, secrets},
{workdir, "services-with-container", "push", "", platforms, secrets},
// local remote action overrides
{workdir, "local-remote-action-overrides", "push", "", platforms, secrets},
}
for _, table := range tables {
@ -315,6 +330,22 @@ func TestRunEvent(t *testing.T) {
config.EventPath = eventFile
}
testConfigFile := filepath.Join(workdir, table.workflowPath, "config.yml")
if file, err := os.ReadFile(testConfigFile); err == nil {
testConfig := &TestConfig{}
if yaml.Unmarshal(file, testConfig) == nil {
if testConfig.LocalRepositories != nil {
config.ActionCache = &LocalRepositoryCache{
Parent: GoGitActionCache{
path.Clean(path.Join(workdir, "cache")),
},
LocalRepositories: testConfig.LocalRepositories,
CacheDirCache: map[string]string{},
}
}
}
}
table.runTest(ctx, t, config)
})
}

View File

@ -34,6 +34,9 @@ const (
stepStagePost
)
// Controls how many symlinks are resolved for local and remote Actions
const maxSymlinkDepth = 10
func (s stepStage) String() string {
switch s {
case stepStagePre:
@ -307,3 +310,13 @@ func mergeIntoMapCaseInsensitive(target map[string]string, maps ...map[string]st
}
}
}
func symlinkJoin(filename, sym, parent string) (string, error) {
dir := path.Dir(filename)
dest := path.Join(dir, sym)
prefix := path.Clean(parent) + "/"
if strings.HasPrefix(dest, prefix) || prefix == "./" {
return dest, nil
}
return "", fmt.Errorf("symlink tries to access file '%s' outside of '%s'", strings.ReplaceAll(dest, "'", "''"), strings.ReplaceAll(parent, "'", "''"))
}

View File

@ -3,7 +3,10 @@ package runner
import (
"archive/tar"
"context"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path"
"path/filepath"
@ -42,15 +45,31 @@ func (sal *stepActionLocal) main() common.Executor {
localReader := func(ctx context.Context) actionYamlReader {
_, cpath := getContainerActionPaths(sal.Step, path.Join(actionDir, ""), sal.RunContext)
return func(filename string) (io.Reader, io.Closer, error) {
tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, path.Join(cpath, filename))
if err != nil {
return nil, nil, os.ErrNotExist
spath := path.Join(cpath, filename)
for i := 0; i < maxSymlinkDepth; i++ {
tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, spath)
if errors.Is(err, fs.ErrNotExist) {
return nil, nil, err
} else if err != nil {
return nil, nil, fs.ErrNotExist
}
treader := tar.NewReader(tars)
header, err := treader.Next()
if errors.Is(err, io.EOF) {
return nil, nil, os.ErrNotExist
} else if err != nil {
return nil, nil, err
}
if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink {
spath, err = symlinkJoin(spath, header.Linkname, cpath)
if err != nil {
return nil, nil, err
}
} else {
return treader, tars, nil
}
}
treader := tar.NewReader(tars)
if _, err := treader.Next(); err != nil {
return nil, nil, os.ErrNotExist
}
return treader, tars, nil
return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath)
}
}

View File

@ -1,6 +1,7 @@
package runner
import (
"archive/tar"
"context"
"errors"
"fmt"
@ -28,6 +29,8 @@ type stepActionRemote struct {
action *model.Action
env map[string]string
remoteAction *remoteAction
cacheDir string
resolvedSha string
}
var stepActionRemoteNewCloneExecutor = git.NewGitCloneExecutor
@ -62,6 +65,48 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
github.Token = sar.RunContext.Config.ReplaceGheActionTokenWithGithubCom
}
}
if sar.RunContext.Config.ActionCache != nil {
cache := sar.RunContext.Config.ActionCache
var err error
sar.cacheDir = fmt.Sprintf("%s/%s", sar.remoteAction.Org, sar.remoteAction.Repo)
repoURL := sar.remoteAction.URL + "/" + sar.cacheDir
repoRef := sar.remoteAction.Ref
sar.resolvedSha, err = cache.Fetch(ctx, sar.cacheDir, repoURL, repoRef, github.Token)
if err != nil {
return fmt.Errorf("failed to fetch \"%s\" version \"%s\": %w", repoURL, repoRef, err)
}
remoteReader := func(ctx context.Context) actionYamlReader {
return func(filename string) (io.Reader, io.Closer, error) {
spath := path.Join(sar.remoteAction.Path, filename)
for i := 0; i < maxSymlinkDepth; i++ {
tars, err := cache.GetTarArchive(ctx, sar.cacheDir, sar.resolvedSha, spath)
if err != nil {
return nil, nil, os.ErrNotExist
}
treader := tar.NewReader(tars)
header, err := treader.Next()
if err != nil {
return nil, nil, os.ErrNotExist
}
if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink {
spath, err = symlinkJoin(spath, header.Linkname, ".")
if err != nil {
return nil, nil, err
}
} else {
return treader, tars, nil
}
}
return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath)
}
}
actionModel, err := sar.readAction(ctx, sar.Step, sar.resolvedSha, sar.remoteAction.Path, remoteReader(ctx), os.WriteFile)
sar.action = actionModel
return err
}
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{
@ -75,6 +120,7 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
For GitHub, they are the same, always github.com.
But for Gitea, tasks triggered by a.com can clone actions from b.com.
*/
OfflineMode: sar.RunContext.Config.ActionOfflineMode,
InsecureSkipTLS: sar.cloneSkipTLS(), // For Gitea
})

View File

@ -182,7 +182,6 @@ func TestSetupEnv(t *testing.T) {
"GITHUB_RUN_ID": "runId",
"GITHUB_RUN_NUMBER": "1",
"GITHUB_SERVER_URL": "https://",
"GITHUB_TOKEN": "",
"GITHUB_WORKFLOW": "",
"INPUT_STEP_WITH": "with-value",
"RC_KEY": "rcvalue",

View File

@ -0,0 +1,3 @@
local-repositories:
https://github.com/nektos/test-override@a: testdata/actions/node20
nektos/test-override@b: testdata/actions/node16

View File

@ -0,0 +1,9 @@
name: basic
on: push
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: nektos/test-override@a
- uses: nektos/test-override@b

View File

@ -0,0 +1,14 @@
name: services-host-network
on: push
jobs:
services-host-network:
runs-on: ubuntu-latest
services:
nginx:
image: "nginx:latest"
ports:
- "8080:80"
steps:
- run: apt-get -qq update && apt-get -yqq install --no-install-recommends curl net-tools
- run: netstat -tlpen
- run: curl -v http://localhost:8080

View File

@ -0,0 +1,16 @@
name: services-with-containers
on: push
jobs:
services-with-containers:
runs-on: ubuntu-latest
# https://docs.github.com/en/actions/using-containerized-services/about-service-containers#running-jobs-in-a-container
container:
image: "ubuntu:latest"
services:
nginx:
image: "nginx:latest"
ports:
- "8080:80"
steps:
- run: apt-get -qq update && apt-get -yqq install --no-install-recommends curl
- run: curl -v http://nginx:80