fix: deep evaluate matrix strategy (#964)

* fix: deep evaluate matrix strategy

* Try to make linter happy.

* Apply PR feedback, fix insert directive more tests

* Fix: logic error

Co-authored-by: Casey Lee <cplee@nektos.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
ChristopherHX 2022-02-15 17:35:02 +01:00 committed by GitHub
parent 0fae96792c
commit d1e0216039
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 236 additions and 44 deletions

View File

@ -8,11 +8,13 @@ import (
"github.com/nektos/act/pkg/exprparser"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
)
// ExpressionEvaluator is the interface for evaluating expressions
type ExpressionEvaluator interface {
evaluate(string, bool) (interface{}, error)
EvaluateYamlNode(node *yaml.Node) error
Interpolate(string) string
}
@ -130,6 +132,82 @@ func (ee expressionEvaluator) evaluate(in string, isIfExpression bool) (interfac
return evaluated, err
}
func (ee expressionEvaluator) evaluateScalarYamlNode(node *yaml.Node) error {
var in string
if err := node.Decode(&in); err != nil {
return err
}
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
return nil
}
expr, _ := rewriteSubExpression(in, false)
if in != expr {
log.Debugf("expression '%s' rewritten to '%s'", in, expr)
}
res, err := ee.evaluate(expr, false)
if err != nil {
return err
}
return node.Encode(res)
}
func (ee expressionEvaluator) evaluateMappingYamlNode(node *yaml.Node) error {
// GitHub has this undocumented feature to merge maps, called insert directive
insertDirective := regexp.MustCompile(`\${{\s*insert\s*}}`)
for i := 0; i < len(node.Content)/2; {
k := node.Content[i*2]
v := node.Content[i*2+1]
if err := ee.EvaluateYamlNode(v); err != nil {
return err
}
var sk string
// Merge the nested map of the insert directive
if k.Decode(&sk) == nil && insertDirective.MatchString(sk) {
node.Content = append(append(node.Content[:i*2], v.Content...), node.Content[(i+1)*2:]...)
i += len(v.Content) / 2
} else {
if err := ee.EvaluateYamlNode(k); err != nil {
return err
}
i++
}
}
return nil
}
func (ee expressionEvaluator) evaluateSequenceYamlNode(node *yaml.Node) error {
for i := 0; i < len(node.Content); {
v := node.Content[i]
// Preserve nested sequences
wasseq := v.Kind == yaml.SequenceNode
if err := ee.EvaluateYamlNode(v); err != nil {
return err
}
// GitHub has this undocumented feature to merge sequences / arrays
// We have a nested sequence via evaluation, merge the arrays
if v.Kind == yaml.SequenceNode && !wasseq {
node.Content = append(append(node.Content[:i], v.Content...), node.Content[i+1:]...)
i += len(v.Content)
} else {
i++
}
}
return nil
}
func (ee expressionEvaluator) EvaluateYamlNode(node *yaml.Node) error {
switch node.Kind {
case yaml.ScalarNode:
return ee.evaluateScalarYamlNode(node)
case yaml.MappingNode:
return ee.evaluateMappingYamlNode(node)
case yaml.SequenceNode:
return ee.evaluateSequenceYamlNode(node)
default:
return nil
}
}
func (ee expressionEvaluator) Interpolate(in string) string {
if !strings.Contains(in, "${{") || !strings.Contains(in, "}}") {
return in

View File

@ -115,59 +115,71 @@ func New(runnerConfig *Config) (Runner, error) {
func (runner *runnerImpl) NewPlanExecutor(plan *model.Plan) common.Executor {
maxJobNameLen := 0
pipeline := make([]common.Executor, 0)
for s, stage := range plan.Stages {
stageExecutor := make([]common.Executor, 0)
for r, run := range stage.Runs {
job := run.Job()
matrixes := job.GetMatrixes()
maxParallel := 4
if job.Strategy != nil {
maxParallel = job.Strategy.MaxParallel
}
if len(matrixes) < maxParallel {
maxParallel = len(matrixes)
}
b := 0
for i, matrix := range matrixes {
rc := runner.newRunContext(run, matrix)
rc.JobName = rc.Name
if len(matrixes) > 1 {
rc.Name = fmt.Sprintf("%s-%d", rc.Name, i+1)
stagePipeline := make([]common.Executor, 0)
for i := range plan.Stages {
s := i
stage := plan.Stages[i]
stagePipeline = append(stagePipeline, func(ctx context.Context) error {
pipeline := make([]common.Executor, 0)
stageExecutor := make([]common.Executor, 0)
for r, run := range stage.Runs {
job := run.Job()
if job.Strategy != nil {
strategyRc := runner.newRunContext(run, nil)
if err := strategyRc.NewExpressionEvaluator().EvaluateYamlNode(&job.Strategy.RawMatrix); err != nil {
log.Errorf("Error while evaluating matrix: %v", err)
}
}
if len(rc.String()) > maxJobNameLen {
maxJobNameLen = len(rc.String())
matrixes := job.GetMatrixes()
maxParallel := 4
if job.Strategy != nil {
maxParallel = job.Strategy.MaxParallel
}
stageExecutor = append(stageExecutor, func(ctx context.Context) error {
jobName := fmt.Sprintf("%-*s", maxJobNameLen, rc.String())
return rc.Executor().Finally(func(ctx context.Context) error {
isLastRunningContainer := func(currentStage int, currentRun int) bool {
return currentStage == len(plan.Stages)-1 && currentRun == len(stage.Runs)-1
}
if runner.config.AutoRemove && isLastRunningContainer(s, r) {
log.Infof("Cleaning up container for job %s", rc.JobName)
if err := rc.stopJobContainer()(ctx); err != nil {
log.Errorf("Error while cleaning container: %v", err)
if len(matrixes) < maxParallel {
maxParallel = len(matrixes)
}
b := 0
for i, matrix := range matrixes {
rc := runner.newRunContext(run, matrix)
rc.JobName = rc.Name
if len(matrixes) > 1 {
rc.Name = fmt.Sprintf("%s-%d", rc.Name, i+1)
}
if len(rc.String()) > maxJobNameLen {
maxJobNameLen = len(rc.String())
}
stageExecutor = append(stageExecutor, func(ctx context.Context) error {
jobName := fmt.Sprintf("%-*s", maxJobNameLen, rc.String())
return rc.Executor().Finally(func(ctx context.Context) error {
isLastRunningContainer := func(currentStage int, currentRun int) bool {
return currentStage == len(plan.Stages)-1 && currentRun == len(stage.Runs)-1
}
}
return nil
})(common.WithJobErrorContainer(WithJobLogger(ctx, jobName, rc.Config.Secrets, rc.Config.InsecureSecrets)))
})
b++
if b == maxParallel {
pipeline = append(pipeline, common.NewParallelExecutor(stageExecutor...))
stageExecutor = make([]common.Executor, 0)
b = 0
if runner.config.AutoRemove && isLastRunningContainer(s, r) {
log.Infof("Cleaning up container for job %s", rc.JobName)
if err := rc.stopJobContainer()(ctx); err != nil {
log.Errorf("Error while cleaning container: %v", err)
}
}
return nil
})(common.WithJobErrorContainer(WithJobLogger(ctx, jobName, rc.Config.Secrets, rc.Config.InsecureSecrets)))
})
b++
if b == maxParallel {
pipeline = append(pipeline, common.NewParallelExecutor(stageExecutor...))
stageExecutor = make([]common.Executor, 0)
b = 0
}
}
}
}
return common.NewPipelineExecutor(pipeline...)(ctx)
})
}
return common.NewPipelineExecutor(pipeline...).Then(handleFailure(plan))
return common.NewPipelineExecutor(stagePipeline...).Then(handleFailure(plan))
}
func handleFailure(plan *model.Plan) common.Executor {

View File

@ -133,6 +133,11 @@ func TestRunEvent(t *testing.T) {
{"testdata", "steps-context/outcome", "push", "", platforms, ""},
{"testdata", "job-status-check", "push", "job 'fail' failed", platforms, ""},
{"testdata", "if-expressions", "push", "Job 'mytest' failed", platforms, ""},
{"testdata", "evalmatrix", "push", "", platforms, ""},
{"testdata", "evalmatrixneeds", "push", "", platforms, ""},
{"testdata", "evalmatrixneeds2", "push", "", platforms, ""},
{"testdata", "evalmatrix-merge-map", "push", "", platforms, ""},
{"testdata", "evalmatrix-merge-array", "push", "", platforms, ""},
{"../model/testdata", "strategy", "push", "", platforms, ""}, // TODO: move all testdata into pkg so we can validate it with planner and runner
// {"testdata", "issue-228", "push", "", platforms, ""}, // TODO [igni]: Remove this once everything passes

View File

@ -0,0 +1,12 @@
on: push
jobs:
a:
strategy:
matrix:
a:
- env:
key1: ${{'val'}}1
- ${{fromJSON('[{"env":{"key2":"val2"}},{"env":{"key3":"val3"}}]')}}
runs-on: ubuntu-latest
steps:
- run: exit ${{ (matrix.a.env.key2 == 'val2' || matrix.a.env.key1 == 'val1' || matrix.a.env.key3 == 'val3' ) && '0' || '1' }}

View File

@ -0,0 +1,14 @@
on: push
jobs:
a:
strategy:
matrix:
a:
- env:
key1: val1
${{insert}}:
key2: val2
${{ insert }}: ${{fromJSON('{"key3":"val3"}')}}
runs-on: ubuntu-latest
steps:
- run: exit ${{ (matrix.a.env.key2 == 'val2' && matrix.a.env.key1 == 'val1' && matrix.a.env.key3 == 'val3' ) && '0' || '1' }}

18
pkg/runner/testdata/evalmatrix/push.yml vendored Normal file
View File

@ -0,0 +1,18 @@
on: push
jobs:
evalm:
strategy:
matrix: |-
${{fromJson('
{
"A": [ "A", "B" ]
}
')}}
runs-on: ubuntu-latest
steps:
- name: Check if the matrix key A exists
run: |
echo $MATRIX
exit ${{matrix.A && '0' || '1'}}
env:
MATRIX: ${{toJSON(matrix)}}

View File

@ -0,0 +1,24 @@
on: push
jobs:
prepare:
runs-on: ubuntu-latest
steps:
- run: |
echo '::set-output name=matrix::{"package": ["a", "b"]}'
id: r1
outputs:
matrix: ${{steps.r1.outputs.matrix}}
evalm:
needs:
- prepare
strategy:
matrix: |-
${{fromJson(needs.prepare.outputs.matrix)}}
runs-on: ubuntu-latest
steps:
- name: Check if the matrix key package exists
run: |
echo $MATRIX
exit ${{matrix.package && '0' || '1'}}
env:
MATRIX: ${{toJSON(matrix)}}

View File

@ -0,0 +1,29 @@
on: push
jobs:
prepare:
runs-on: ubuntu-latest
steps:
- run: |
echo '::set-output name=matrix::["a", "b"]'
id: r1
outputs:
matrix: ${{steps.r1.outputs.matrix}}
helix: steady
evalm:
needs:
- prepare
strategy:
matrix:
${{needs.prepare.outputs.helix}}: |-
${{fromJson(needs.prepare.outputs.matrix)}}
runs-on: ubuntu-latest
steps:
- name: Check if the matrix key doesn't ends up unevaluated
run: |
echo $MATRIX
exit ${{matrix['${{needs.prepare.outputs.helix}}'] && '1' || '0'}}
env:
MATRIX: ${{toJSON(matrix)}}
- name: Check if the evaluated matrix key contains a value
run: |
exit ${{matrix[needs.prepare.outputs.helix] && '0' || '1'}}