Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: lock before plan #1

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ ARG DEFAULT_CONFTEST_VERSION=0.46.0

# Stage 1: build artifact and download deps

FROM golang:1.22.0-alpine AS builder
FROM golang:1.22.1-alpine AS builder

ARG ATLANTIS_VERSION=dev
ENV ATLANTIS_VERSION=${ATLANTIS_VERSION}
Expand Down Expand Up @@ -159,7 +159,7 @@ COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
# Install packages needed to run Atlantis.
# We place this last as it will bust less docker layer caches when packages update
RUN apk add --no-cache \
ca-certificates~=20230506 \
ca-certificates \
curl~=8 \
git~=2 \
unzip~=6 \
Expand Down
1 change: 1 addition & 0 deletions server/controllers/events/events_controller_e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1517,6 +1517,7 @@ func setupE2E(t *testing.T, repoDir string, opt setupOption) (events_controllers
lockingClient,
discardApprovalOnPlan,
e2ePullReqStatusFetcher,
projectLocker,
)

applyCommandRunner := events.NewApplyCommandRunner(
Expand Down
1 change: 1 addition & 0 deletions server/events/command_runner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ func setup(t *testing.T, options ...func(testConfig *TestConfig)) *vcsmocks.Mock
lockingLocker,
testConfig.discardApprovalOnPlan,
pullReqStatusFetcher,
nil,
)

applyCommandRunner = events.NewApplyCommandRunner(
Expand Down
108 changes: 98 additions & 10 deletions server/events/plan_command_runner.go
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
package events

import (
"github.com/pkg/errors"
"github.com/runatlantis/atlantis/server/core/locking"
"github.com/runatlantis/atlantis/server/events/command"
"github.com/runatlantis/atlantis/server/events/models"
"github.com/runatlantis/atlantis/server/events/vcs"
"sync"
)

func NewPlanCommandRunner(
Expand All @@ -26,6 +28,7 @@ func NewPlanCommandRunner(
lockingLocker locking.Locker,
discardApprovalOnPlan bool,
pullReqStatusFetcher vcs.PullReqStatusFetcher,
projectLocker ProjectLocker,
) *PlanCommandRunner {
return &PlanCommandRunner{
silenceVCSStatusNoPlans: silenceVCSStatusNoPlans,
Expand All @@ -46,6 +49,7 @@ func NewPlanCommandRunner(
lockingLocker: lockingLocker,
DiscardApprovalOnPlan: discardApprovalOnPlan,
pullReqStatusFetcher: pullReqStatusFetcher,
projectLocker: projectLocker,
}
}

Expand All @@ -72,6 +76,8 @@ type PlanCommandRunner struct {
parallelPoolSize int
pullStatusFetcher PullStatusFetcher
lockingLocker locking.Locker
projectLocker ProjectLocker
mtx sync.Mutex
// DiscardApprovalOnPlan controls if all already existing approvals should be removed/dismissed before executing
// a plan.
DiscardApprovalOnPlan bool
Expand Down Expand Up @@ -126,13 +132,54 @@ func (p *PlanCommandRunner) runAutoplan(ctx *command.Context) {
ctx.Log.Err("deleting locks: %s", err)
}

// Only run commands in parallel if enabled
var projectResults []command.ProjectResult
if p.projectLocker != nil {
p.mtx.Lock()
for _, pctx := range projectCmds {
lockResult := command.ProjectResult{
Command: command.Plan,
PlanSuccess: nil,
Error: nil,
Failure: "",
RepoRelDir: pctx.RepoRelDir,
Workspace: pctx.Workspace,
ProjectName: pctx.ProjectName,
}

// Lock the project
lockResponse, err := p.projectLocker.TryLock(pctx.Log, pctx.Pull, pctx.User, pctx.Workspace, models.NewProject(pctx.Pull.BaseRepo.FullName, pctx.RepoRelDir, pctx.ProjectName), pctx.RepoLocking)
if err != nil {
pctx.Log.Err("locking project: %s", err)
lockResult.Error = errors.Wrap(err, "acquiring lock")
} else {
lockResult.Failure = lockResponse.LockFailureReason
}
if lockResult.Error != nil || lockResult.Failure != "" {
projectResults = append(projectResults, lockResult)
}
}
p.mtx.Unlock()
}

var result command.Result
if p.isParallelEnabled(projectCmds) {
ctx.Log.Info("Running plans in parallel")
result = runProjectCmdsParallelGroups(ctx, projectCmds, p.prjCmdRunner.Plan, p.parallelPoolSize)

if len(projectResults) > 0 {
result = command.Result{
ProjectResults: projectResults,
}

_, err = p.lockingLocker.UnlockByPull(baseRepo.FullName, pull.Num)
if err != nil {
ctx.Log.Err("deleting locks: %s", err)
}
} else {
result = runProjectCmds(projectCmds, p.prjCmdRunner.Plan)
// Only run commands in parallel if enabled
if p.isParallelEnabled(projectCmds) {
ctx.Log.Info("Running plans in parallel")
result = runProjectCmdsParallelGroups(ctx, projectCmds, p.prjCmdRunner.Plan, p.parallelPoolSize)
} else {
result = runProjectCmds(projectCmds, p.prjCmdRunner.Plan)
}
}

if p.autoMerger.automergeEnabled(projectCmds) && result.HasErrors() {
Expand Down Expand Up @@ -253,13 +300,54 @@ func (p *PlanCommandRunner) run(ctx *command.Context, cmd *CommentCommand) {
}
}

// Only run commands in parallel if enabled
var projectResults []command.ProjectResult
if p.projectLocker != nil {
p.mtx.Lock()
for _, pctx := range projectCmds {
lockResult := command.ProjectResult{
Command: command.Plan,
PlanSuccess: nil,
Error: nil,
Failure: "",
RepoRelDir: pctx.RepoRelDir,
Workspace: pctx.Workspace,
ProjectName: pctx.ProjectName,
}

// Lock the project
lockResponse, err := p.projectLocker.TryLock(pctx.Log, pctx.Pull, pctx.User, pctx.Workspace, models.NewProject(pctx.Pull.BaseRepo.FullName, pctx.RepoRelDir, pctx.ProjectName), pctx.RepoLocking)
if err != nil {
pctx.Log.Err("locking project: %s", err)
lockResult.Error = errors.Wrap(err, "acquiring lock")
} else {
lockResult.Failure = lockResponse.LockFailureReason
}
if lockResult.Error != nil || lockResult.Failure != "" {
projectResults = append(projectResults, lockResult)
}
}
p.mtx.Unlock()
}

var result command.Result
if p.isParallelEnabled(projectCmds) {
ctx.Log.Info("Running plans in parallel")
result = runProjectCmdsParallelGroups(ctx, projectCmds, p.prjCmdRunner.Plan, p.parallelPoolSize)

if len(projectResults) > 0 {
result = command.Result{
ProjectResults: projectResults,
}

_, err = p.lockingLocker.UnlockByPull(baseRepo.FullName, pull.Num)
if err != nil {
ctx.Log.Err("deleting locks: %s", err)
}
} else {
result = runProjectCmds(projectCmds, p.prjCmdRunner.Plan)
// Only run commands in parallel if enabled
if p.isParallelEnabled(projectCmds) {
ctx.Log.Info("Running plans in parallel")
result = runProjectCmdsParallelGroups(ctx, projectCmds, p.prjCmdRunner.Plan, p.parallelPoolSize)
} else {
result = runProjectCmds(projectCmds, p.prjCmdRunner.Plan)
}
}

if p.autoMerger.automergeEnabled(projectCmds) && result.HasErrors() {
Expand Down
1 change: 1 addition & 0 deletions server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -715,6 +715,7 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) {
lockingClient,
userConfig.DiscardApprovalOnPlanFlag,
pullReqStatusFetcher,
projectLocker,
)

applyCommandRunner := events.NewApplyCommandRunner(
Expand Down