From 19099f9d66f1b148e1347dc17aa34388054956e7 Mon Sep 17 00:00:00 2001 From: Daan Vinken Date: Wed, 11 Mar 2026 15:17:54 +0100 Subject: [PATCH 1/8] feat: add PlanStore interface and LocalPlanStore implementation Introduces PlanStore abstraction for plan file persistence. LocalPlanStore wraps current filesystem behavior with no functional change. Phase 2 will add S3PlanStore as a drop-in replacement to remove the PV dependency. Signed-off-by: Daan Vinken Signed-off-by: Daan Vinken --- .../events/events_controller_e2e_test.go | 6 ++- server/core/runtime/apply_step_runner.go | 7 ++- server/core/runtime/apply_step_runner_test.go | 9 ++++ server/core/runtime/import_step_runner.go | 7 +-- .../core/runtime/import_step_runner_test.go | 6 +-- server/core/runtime/plan_step_runner.go | 10 +++- server/core/runtime/plan_step_runner_test.go | 14 ++--- server/core/runtime/plan_store.go | 37 +++++++++++++ server/core/runtime/plan_store_test.go | 52 +++++++++++++++++++ server/core/runtime/state_rm_step_runner.go | 7 +-- .../core/runtime/state_rm_step_runner_test.go | 6 +-- server/server.go | 9 ++-- 12 files changed, 143 insertions(+), 27 deletions(-) create mode 100644 server/core/runtime/plan_store.go create mode 100644 server/core/runtime/plan_store_test.go diff --git a/server/controllers/events/events_controller_e2e_test.go b/server/controllers/events/events_controller_e2e_test.go index 02ee0a1bcc..cc6aacbc21 100644 --- a/server/controllers/events/events_controller_e2e_test.go +++ b/server/controllers/events/events_controller_e2e_test.go @@ -1490,14 +1490,16 @@ func setupE2E(t *testing.T, repoDir string, opt setupOption) (events_controllers defaultTFVersion, statusUpdater, asyncTfExec, + &runtime.LocalPlanStore{}, ), ShowStepRunner: showStepRunner, PolicyCheckStepRunner: policyCheckRunner, ApplyStepRunner: &runtime.ApplyStepRunner{ TerraformExecutor: terraformClient, + PlanStore: &runtime.LocalPlanStore{}, }, - ImportStepRunner: runtime.NewImportStepRunner(terraformClient, defaultTFDistribution, defaultTFVersion), - StateRmStepRunner: runtime.NewStateRmStepRunner(terraformClient, defaultTFDistribution, defaultTFVersion), + ImportStepRunner: runtime.NewImportStepRunner(terraformClient, defaultTFDistribution, defaultTFVersion, &runtime.LocalPlanStore{}), + StateRmStepRunner: runtime.NewStateRmStepRunner(terraformClient, defaultTFDistribution, defaultTFVersion, &runtime.LocalPlanStore{}), RunStepRunner: &runtime.RunStepRunner{ TerraformExecutor: terraformClient, DefaultTFDistribution: defaultTFDistribution, diff --git a/server/core/runtime/apply_step_runner.go b/server/core/runtime/apply_step_runner.go index 219fd96789..24c3b3a496 100644 --- a/server/core/runtime/apply_step_runner.go +++ b/server/core/runtime/apply_step_runner.go @@ -16,7 +16,6 @@ import ( "github.com/runatlantis/atlantis/server/core/terraform" "github.com/runatlantis/atlantis/server/events/command" "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/utils" ) // ApplyStepRunner runs `terraform apply`. @@ -26,6 +25,7 @@ type ApplyStepRunner struct { DefaultTFVersion *version.Version `validate:"required"` CommitStatusUpdater StatusUpdater `validate:"required"` AsyncTFExec AsyncTFExec `validate:"required"` + PlanStore PlanStore `validate:"required"` } func (a *ApplyStepRunner) Run(ctx command.ProjectContext, extraArgs []string, path string, envs map[string]string) (string, error) { @@ -34,6 +34,9 @@ func (a *ApplyStepRunner) Run(ctx command.ProjectContext, extraArgs []string, pa } planPath := filepath.Join(path, GetPlanFilename(ctx.Workspace, ctx.ProjectName)) + if loadErr := a.PlanStore.Load(ctx, planPath); loadErr != nil { + return "", fmt.Errorf("loading plan: %w", loadErr) + } contents, err := os.ReadFile(planPath) if os.IsNotExist(err) { return "", fmt.Errorf("no plan found at path %q and workspace %q–did you run plan?", ctx.RepoRelDir, ctx.Workspace) @@ -70,7 +73,7 @@ func (a *ApplyStepRunner) Run(ctx command.ProjectContext, extraArgs []string, pa // If the apply was successful, delete the plan. if err == nil { ctx.Log.Info("apply successful, deleting planfile") - if removeErr := utils.RemoveIgnoreNonExistent(planPath); removeErr != nil { + if removeErr := a.PlanStore.Remove(ctx, planPath); removeErr != nil { ctx.Log.Warn("failed to delete planfile after successful apply: %s", removeErr) } } diff --git a/server/core/runtime/apply_step_runner_test.go b/server/core/runtime/apply_step_runner_test.go index 163e8e34d4..7edbd15b6b 100644 --- a/server/core/runtime/apply_step_runner_test.go +++ b/server/core/runtime/apply_step_runner_test.go @@ -30,6 +30,7 @@ import ( func TestRun_NoDir(t *testing.T) { o := runtime.ApplyStepRunner{ TerraformExecutor: nil, + PlanStore: &runtime.LocalPlanStore{}, } _, err := o.Run(command.ProjectContext{ RepoRelDir: ".", @@ -42,6 +43,7 @@ func TestRun_NoPlanFile(t *testing.T) { tmpDir := t.TempDir() o := runtime.ApplyStepRunner{ TerraformExecutor: nil, + PlanStore: &runtime.LocalPlanStore{}, } _, err := o.Run(command.ProjectContext{ RepoRelDir: ".", @@ -70,6 +72,7 @@ func TestRun_Success(t *testing.T) { o := runtime.ApplyStepRunner{ TerraformExecutor: terraform, DefaultTFDistribution: tfDistribution, + PlanStore: &runtime.LocalPlanStore{}, } When(terraform.RunCommandWithVersion(Any[command.ProjectContext](), Any[string](), Any[[]string](), Any[map[string]string](), Any[tf.Distribution](), Any[*version.Version](), Any[string]())). @@ -105,6 +108,7 @@ func TestRun_AppliesCorrectProjectPlan(t *testing.T) { o := runtime.ApplyStepRunner{ TerraformExecutor: terraform, DefaultTFDistribution: tfDistribution, + PlanStore: &runtime.LocalPlanStore{}, } When(terraform.RunCommandWithVersion(Any[command.ProjectContext](), Any[string](), Any[[]string](), Any[map[string]string](), Any[tf.Distribution](), Any[*version.Version](), Any[string]())). ThenReturn("output", nil) @@ -139,6 +143,7 @@ func TestApplyStepRunner_TestRun_UsesConfiguredTFVersion(t *testing.T) { o := runtime.ApplyStepRunner{ TerraformExecutor: terraform, DefaultTFDistribution: tfDistribution, + PlanStore: &runtime.LocalPlanStore{}, } When(terraform.RunCommandWithVersion(Any[command.ProjectContext](), Any[string](), Any[[]string](), Any[map[string]string](), Any[tf.Distribution](), Any[*version.Version](), Any[string]())). ThenReturn("output", nil) @@ -175,6 +180,7 @@ func TestApplyStepRunner_TestRun_UsesConfiguredDistribution(t *testing.T) { TerraformExecutor: terraform, DefaultTFDistribution: tfDistribution, DefaultTFVersion: tfVersion, + PlanStore: &runtime.LocalPlanStore{}, } When(terraform.RunCommandWithVersion(Any[command.ProjectContext](), Any[string](), Any[[]string](), Any[map[string]string](), NotEq[tf.Distribution](tfDistribution), Any[*version.Version](), Any[string]())). ThenReturn("output", nil) @@ -248,6 +254,7 @@ func TestRun_UsingTarget(t *testing.T) { terraform := tfclientmocks.NewMockClient() step := runtime.ApplyStepRunner{ TerraformExecutor: terraform, + PlanStore: &runtime.LocalPlanStore{}, } output, err := step.Run(command.ProjectContext{ @@ -291,6 +298,7 @@ Plan: 0 to add, 0 to change, 1 to destroy.` o := runtime.ApplyStepRunner{ AsyncTFExec: tfExec, CommitStatusUpdater: updater, + PlanStore: &runtime.LocalPlanStore{}, } tfVersion, _ := version.NewVersion("0.11.0") ctx := command.ProjectContext{ @@ -351,6 +359,7 @@ Plan: 0 to add, 0 to change, 1 to destroy.` o := runtime.ApplyStepRunner{ AsyncTFExec: tfExec, CommitStatusUpdater: runtimemocks.NewMockStatusUpdater(), + PlanStore: &runtime.LocalPlanStore{}, } tfVersion, _ := version.NewVersion("0.11.0") diff --git a/server/core/runtime/import_step_runner.go b/server/core/runtime/import_step_runner.go index 7f3a67a4fa..f77cd2cf89 100644 --- a/server/core/runtime/import_step_runner.go +++ b/server/core/runtime/import_step_runner.go @@ -10,20 +10,21 @@ import ( version "github.com/hashicorp/go-version" "github.com/runatlantis/atlantis/server/core/terraform" "github.com/runatlantis/atlantis/server/events/command" - "github.com/runatlantis/atlantis/server/utils" ) type importStepRunner struct { terraformExecutor TerraformExec defaultTFDistribution terraform.Distribution defaultTFVersion *version.Version + planStore PlanStore } -func NewImportStepRunner(terraformExecutor TerraformExec, defaultTfDistribution terraform.Distribution, defaultTfVersion *version.Version) Runner { +func NewImportStepRunner(terraformExecutor TerraformExec, defaultTfDistribution terraform.Distribution, defaultTfVersion *version.Version, planStore PlanStore) Runner { runner := &importStepRunner{ terraformExecutor: terraformExecutor, defaultTFDistribution: defaultTfDistribution, defaultTFVersion: defaultTfVersion, + planStore: planStore, } return NewWorkspaceStepRunnerDelegate(terraformExecutor, defaultTfDistribution, defaultTfVersion, runner) } @@ -48,7 +49,7 @@ func (p *importStepRunner) Run(ctx command.ProjectContext, extraArgs []string, p if err == nil { if _, planPathErr := os.Stat(planPath); !os.IsNotExist(planPathErr) { ctx.Log.Info("import successful, deleting planfile") - if removeErr := utils.RemoveIgnoreNonExistent(planPath); removeErr != nil { + if removeErr := p.planStore.Remove(ctx, planPath); removeErr != nil { ctx.Log.Warn("failed to delete planfile after successful import: %s", removeErr) } } diff --git a/server/core/runtime/import_step_runner_test.go b/server/core/runtime/import_step_runner_test.go index fa543847c3..2d97cb5ac9 100644 --- a/server/core/runtime/import_step_runner_test.go +++ b/server/core/runtime/import_step_runner_test.go @@ -38,7 +38,7 @@ func TestImportStepRunner_Run_Success(t *testing.T) { mockDownloader := mocks.NewMockDownloader() tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) tfVersion, _ := version.NewVersion("0.15.0") - s := NewImportStepRunner(terraform, tfDistribution, tfVersion) + s := NewImportStepRunner(terraform, tfDistribution, tfVersion, &LocalPlanStore{}) When(terraform.RunCommandWithVersion(Any[command.ProjectContext](), Any[string](), Any[[]string](), Any[map[string]string](), Any[tf.Distribution](), Any[*version.Version](), Any[string]())). ThenReturn("output", nil) @@ -70,7 +70,7 @@ func TestImportStepRunner_Run_Workspace(t *testing.T) { tfVersion, _ := version.NewVersion("0.15.0") mockDownloader := mocks.NewMockDownloader() tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) - s := NewImportStepRunner(terraform, tfDistribution, tfVersion) + s := NewImportStepRunner(terraform, tfDistribution, tfVersion, &LocalPlanStore{}) When(terraform.RunCommandWithVersion(Any[command.ProjectContext](), Any[string](), Any[[]string](), Any[map[string]string](), Any[tf.Distribution](), Any[*version.Version](), Any[string]())). ThenReturn("output", nil) @@ -111,7 +111,7 @@ func TestImportStepRunner_Run_UsesConfiguredDistribution(t *testing.T) { tfVersion, _ := version.NewVersion("0.15.0") mockDownloader := mocks.NewMockDownloader() tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) - s := NewImportStepRunner(terraform, tfDistribution, tfVersion) + s := NewImportStepRunner(terraform, tfDistribution, tfVersion, &LocalPlanStore{}) When(terraform.RunCommandWithVersion(Any[command.ProjectContext](), Any[string](), Any[[]string](), Any[map[string]string](), Any[tf.Distribution](), Any[*version.Version](), Any[string]())). ThenReturn("output", nil) diff --git a/server/core/runtime/plan_step_runner.go b/server/core/runtime/plan_step_runner.go index 49fc5b5942..7b2d51b80e 100644 --- a/server/core/runtime/plan_step_runner.go +++ b/server/core/runtime/plan_step_runner.go @@ -34,15 +34,17 @@ type planStepRunner struct { DefaultTFVersion *version.Version CommitStatusUpdater StatusUpdater AsyncTFExec AsyncTFExec + PlanStore PlanStore } -func NewPlanStepRunner(terraformExecutor TerraformExec, defaultTfDistribution terraform.Distribution, defaultTfVersion *version.Version, commitStatusUpdater StatusUpdater, asyncTFExec AsyncTFExec) Runner { +func NewPlanStepRunner(terraformExecutor TerraformExec, defaultTfDistribution terraform.Distribution, defaultTfVersion *version.Version, commitStatusUpdater StatusUpdater, asyncTFExec AsyncTFExec, planStore PlanStore) Runner { runner := &planStepRunner{ TerraformExecutor: terraformExecutor, DefaultTFDistribution: defaultTfDistribution, DefaultTFVersion: defaultTfVersion, CommitStatusUpdater: commitStatusUpdater, AsyncTFExec: asyncTFExec, + PlanStore: planStore, } return NewWorkspaceStepRunnerDelegate(terraformExecutor, defaultTfDistribution, defaultTfVersion, runner) } @@ -67,6 +69,9 @@ func (p *planStepRunner) Run(ctx command.ProjectContext, extraArgs []string, pat if err != nil { return output, err } + if saveErr := p.PlanStore.Save(ctx, planFile); saveErr != nil { + return output, fmt.Errorf("saving plan: %w", saveErr) + } return p.fmtPlanOutput(output, tfVersion), nil } @@ -109,6 +114,9 @@ func (p *planStepRunner) remotePlan(ctx command.ProjectContext, extraArgs []stri if err != nil { return output, fmt.Errorf("unable to create planfile for remote ops: %w", err) } + if saveErr := p.PlanStore.Save(ctx, planFile); saveErr != nil { + return output, fmt.Errorf("saving plan: %w", saveErr) + } return p.fmtPlanOutput(output, tfVersion), nil } diff --git a/server/core/runtime/plan_step_runner_test.go b/server/core/runtime/plan_step_runner_test.go index cd6d89f8a6..5b4ac4fc46 100644 --- a/server/core/runtime/plan_step_runner_test.go +++ b/server/core/runtime/plan_step_runner_test.go @@ -47,7 +47,7 @@ func TestRun_AddsEnvVarFile(t *testing.T) { // Using version >= 0.10 here so we don't expect any env commands. tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger(t) - s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec) + s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec, &runtime.LocalPlanStore{}) expPlanArgs := []string{"plan", "-input=false", @@ -108,7 +108,7 @@ func TestRun_UsesDiffPathForProject(t *testing.T) { tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger(t) - s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec) + s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec, &runtime.LocalPlanStore{}) ctx := command.ProjectContext{ Log: logger, Workspace: "default", @@ -189,7 +189,7 @@ Terraform will perform the following actions: mockDownloader := mocks.NewMockDownloader() tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) tfVersion, _ := version.NewVersion("0.10.0") - s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec) + s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec, &runtime.LocalPlanStore{}) When(terraform.RunCommandWithVersion( Any[command.ProjectContext](), Any[string](), @@ -242,7 +242,7 @@ func TestRun_OutputOnErr(t *testing.T) { mockDownloader := mocks.NewMockDownloader() tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) tfVersion, _ := version.NewVersion("0.10.0") - s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec) + s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec, &runtime.LocalPlanStore{}) expOutput := "expected output" expErrMsg := "error!" When(terraform.RunCommandWithVersion( @@ -318,7 +318,7 @@ func TestRun_NoOptionalVarsIn012(t *testing.T) { mockDownloader := mocks.NewMockDownloader() tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) tfVersion, _ := version.NewVersion(c.tfVersion) - s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec) + s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec, &runtime.LocalPlanStore{}) ctx := command.ProjectContext{ Workspace: "default", RepoRelDir: ".", @@ -410,7 +410,7 @@ locally at this time. tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) tfVersion, _ := version.NewVersion(c.tfVersion) asyncTf := &remotePlanMock{} - s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTf) + s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTf, &runtime.LocalPlanStore{}) absProjectPath := t.TempDir() // First, terraform workspace gets run. @@ -607,7 +607,7 @@ func TestPlanStepRunner_TestRun_UsesConfiguredDistribution(t *testing.T) { mockDownloader := mocks.NewMockDownloader() tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) tfVersion, _ := version.NewVersion(c.tfVersion) - s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec) + s := runtime.NewPlanStepRunner(terraform, tfDistribution, tfVersion, commitStatusUpdater, asyncTfExec, &runtime.LocalPlanStore{}) ctx := command.ProjectContext{ Workspace: "default", RepoRelDir: ".", diff --git a/server/core/runtime/plan_store.go b/server/core/runtime/plan_store.go new file mode 100644 index 0000000000..c108bf8018 --- /dev/null +++ b/server/core/runtime/plan_store.go @@ -0,0 +1,37 @@ +// Copyright 2025 The Atlantis Authors +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "github.com/runatlantis/atlantis/server/events/command" + "github.com/runatlantis/atlantis/server/utils" +) + +// PlanStore abstracts plan file persistence. +// In Phase 1, LocalPlanStore wraps current filesystem behavior (Save/Load are no-ops). +// In Phase 2, S3PlanStore will upload after plan and download before apply. +type PlanStore interface { + // Save persists a plan file after terraform writes it to planPath. + Save(ctx command.ProjectContext, planPath string) error + // Load ensures a plan file exists at planPath before terraform reads it. + Load(ctx command.ProjectContext, planPath string) error + // Remove deletes a plan file (local + external) after apply/import/state-rm. + Remove(ctx command.ProjectContext, planPath string) error +} + +// LocalPlanStore implements PlanStore using the local filesystem. +// Save and Load are no-ops because terraform already reads/writes locally. +type LocalPlanStore struct{} + +func (s *LocalPlanStore) Save(_ command.ProjectContext, _ string) error { + return nil +} + +func (s *LocalPlanStore) Load(_ command.ProjectContext, _ string) error { + return nil +} + +func (s *LocalPlanStore) Remove(_ command.ProjectContext, planPath string) error { + return utils.RemoveIgnoreNonExistent(planPath) +} diff --git a/server/core/runtime/plan_store_test.go b/server/core/runtime/plan_store_test.go new file mode 100644 index 0000000000..8259a06ac0 --- /dev/null +++ b/server/core/runtime/plan_store_test.go @@ -0,0 +1,52 @@ +// Copyright 2025 The Atlantis Authors +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "os" + "path/filepath" + "testing" + + "github.com/runatlantis/atlantis/server/events/command" + "github.com/runatlantis/atlantis/server/logging" + . "github.com/runatlantis/atlantis/testing" +) + +func TestLocalPlanStore_Save_IsNoop(t *testing.T) { + store := &LocalPlanStore{} + ctx := command.ProjectContext{Log: logging.NewNoopLogger(t)} + err := store.Save(ctx, "/nonexistent/path/plan.tfplan") + Ok(t, err) +} + +func TestLocalPlanStore_Load_IsNoop(t *testing.T) { + store := &LocalPlanStore{} + ctx := command.ProjectContext{Log: logging.NewNoopLogger(t)} + err := store.Load(ctx, "/nonexistent/path/plan.tfplan") + Ok(t, err) +} + +func TestLocalPlanStore_Remove_DeletesFile(t *testing.T) { + store := &LocalPlanStore{} + ctx := command.ProjectContext{Log: logging.NewNoopLogger(t)} + + tmpDir := t.TempDir() + planPath := filepath.Join(tmpDir, "test.tfplan") + err := os.WriteFile(planPath, []byte("plan"), 0600) + Ok(t, err) + + err = store.Remove(ctx, planPath) + Ok(t, err) + + _, err = os.Stat(planPath) + Assert(t, os.IsNotExist(err), "plan file should be deleted") +} + +func TestLocalPlanStore_Remove_NonexistentFile(t *testing.T) { + store := &LocalPlanStore{} + ctx := command.ProjectContext{Log: logging.NewNoopLogger(t)} + + err := store.Remove(ctx, "/nonexistent/path/plan.tfplan") + Ok(t, err) +} diff --git a/server/core/runtime/state_rm_step_runner.go b/server/core/runtime/state_rm_step_runner.go index a140d54338..0115aace72 100644 --- a/server/core/runtime/state_rm_step_runner.go +++ b/server/core/runtime/state_rm_step_runner.go @@ -10,20 +10,21 @@ import ( version "github.com/hashicorp/go-version" "github.com/runatlantis/atlantis/server/core/terraform" "github.com/runatlantis/atlantis/server/events/command" - "github.com/runatlantis/atlantis/server/utils" ) type stateRmStepRunner struct { terraformExecutor TerraformExec defaultTFDistribution terraform.Distribution defaultTFVersion *version.Version + planStore PlanStore } -func NewStateRmStepRunner(terraformExecutor TerraformExec, defaultTfDistribution terraform.Distribution, defaultTfVersion *version.Version) Runner { +func NewStateRmStepRunner(terraformExecutor TerraformExec, defaultTfDistribution terraform.Distribution, defaultTfVersion *version.Version, planStore PlanStore) Runner { runner := &stateRmStepRunner{ terraformExecutor: terraformExecutor, defaultTFDistribution: defaultTfDistribution, defaultTFVersion: defaultTfVersion, + planStore: planStore, } return NewWorkspaceStepRunnerDelegate(terraformExecutor, defaultTfDistribution, defaultTfVersion, runner) } @@ -48,7 +49,7 @@ func (p *stateRmStepRunner) Run(ctx command.ProjectContext, extraArgs []string, if err == nil { if _, planPathErr := os.Stat(planPath); !os.IsNotExist(planPathErr) { ctx.Log.Info("state rm successful, deleting planfile") - if removeErr := utils.RemoveIgnoreNonExistent(planPath); removeErr != nil { + if removeErr := p.planStore.Remove(ctx, planPath); removeErr != nil { ctx.Log.Warn("failed to delete planfile after successful state rm: %s", removeErr) } } diff --git a/server/core/runtime/state_rm_step_runner_test.go b/server/core/runtime/state_rm_step_runner_test.go index 60e351c699..33a4a481a9 100644 --- a/server/core/runtime/state_rm_step_runner_test.go +++ b/server/core/runtime/state_rm_step_runner_test.go @@ -38,7 +38,7 @@ func TestStateRmStepRunner_Run_Success(t *testing.T) { tfVersion, _ := version.NewVersion("0.15.0") mockDownloader := mocks.NewMockDownloader() tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) - s := NewStateRmStepRunner(terraform, tfDistribution, tfVersion) + s := NewStateRmStepRunner(terraform, tfDistribution, tfVersion, &LocalPlanStore{}) When(terraform.RunCommandWithVersion(Any[command.ProjectContext](), Any[string](), Any[[]string](), Any[map[string]string](), Any[tf.Distribution](), Any[*version.Version](), Any[string]())). ThenReturn("output", nil) @@ -70,7 +70,7 @@ func TestStateRmStepRunner_Run_Workspace(t *testing.T) { tfVersion, _ := version.NewVersion("0.15.0") mockDownloader := mocks.NewMockDownloader() tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) - s := NewStateRmStepRunner(terraform, tfDistribution, tfVersion) + s := NewStateRmStepRunner(terraform, tfDistribution, tfVersion, &LocalPlanStore{}) When(terraform.RunCommandWithVersion(Any[command.ProjectContext](), Any[string](), Any[[]string](), Any[map[string]string](), Any[tf.Distribution](), Any[*version.Version](), Any[string]())). ThenReturn("output", nil) @@ -112,7 +112,7 @@ func TestStateRmStepRunner_Run_UsesConfiguredDistribution(t *testing.T) { tfVersion, _ := version.NewVersion("0.15.0") mockDownloader := mocks.NewMockDownloader() tfDistribution := tf.NewDistributionTerraformWithDownloader(mockDownloader) - s := NewStateRmStepRunner(terraform, tfDistribution, tfVersion) + s := NewStateRmStepRunner(terraform, tfDistribution, tfVersion, &LocalPlanStore{}) When(terraform.RunCommandWithVersion(Any[command.ProjectContext](), Any[string](), Any[[]string](), Any[map[string]string](), Any[tf.Distribution](), Any[*version.Version](), Any[string]())). ThenReturn("output", nil) diff --git a/server/server.go b/server/server.go index 2e409e7262..bdc8485d5b 100644 --- a/server/server.go +++ b/server/server.go @@ -674,6 +674,8 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { terraformClient, ) + planStore := &runtime.LocalPlanStore{} + showStepRunner, err := runtime.NewShowStepRunner(terraformClient, defaultTfDistribution, defaultTfVersion) if err != nil { @@ -706,7 +708,7 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { DefaultTFDistribution: defaultTfDistribution, DefaultTFVersion: defaultTfVersion, }, - PlanStepRunner: runtime.NewPlanStepRunner(terraformClient, defaultTfDistribution, defaultTfVersion, commitStatusUpdater, terraformClient), + PlanStepRunner: runtime.NewPlanStepRunner(terraformClient, defaultTfDistribution, defaultTfVersion, commitStatusUpdater, terraformClient, planStore), ShowStepRunner: showStepRunner, PolicyCheckStepRunner: policyCheckStepRunner, ApplyStepRunner: &runtime.ApplyStepRunner{ @@ -715,6 +717,7 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { DefaultTFVersion: defaultTfVersion, CommitStatusUpdater: commitStatusUpdater, AsyncTFExec: terraformClient, + PlanStore: planStore, }, RunStepRunner: runStepRunner, EnvStepRunner: &runtime.EnvStepRunner{ @@ -728,8 +731,8 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { DefaultTFDistribution: defaultTfDistribution, DefaultTFVersion: defaultTfVersion, }, - ImportStepRunner: runtime.NewImportStepRunner(terraformClient, defaultTfDistribution, defaultTfVersion), - StateRmStepRunner: runtime.NewStateRmStepRunner(terraformClient, defaultTfDistribution, defaultTfVersion), + ImportStepRunner: runtime.NewImportStepRunner(terraformClient, defaultTfDistribution, defaultTfVersion, planStore), + StateRmStepRunner: runtime.NewStateRmStepRunner(terraformClient, defaultTfDistribution, defaultTfVersion, planStore), WorkingDir: workingDir, Webhooks: webhooksManager, WorkingDirLocker: workingDirLocker, From b6e1c3d3ce029678b09d2308f62ad2ef2d3a3b77 Mon Sep 17 00:00:00 2001 From: "daanvinken@tythus.com" Date: Fri, 13 Mar 2026 13:51:17 +0100 Subject: [PATCH 2/8] feat: add S3PlanStore for stateless plan persistence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plan files are uploaded to S3 after plan and downloaded before apply, allowing pods to restart without losing plans. On apply, if the working directory is missing (e.g. emptyDir wiped), the repo is re-cloned and plans are restored from S3 via prefix scan. LocalPlanStore behavior is unchanged — the re-clone and restore logic only activates when an external plan store is configured. Signed-off-by: Daan Vinken Signed-off-by: Daan Vinken --- cmd/server.go | 45 +++ go.mod | 19 ++ go.sum | 38 +++ .../events/events_controller_e2e_test.go | 1 + server/core/runtime/plan_store.go | 9 + server/core/runtime/s3_plan_store.go | 269 ++++++++++++++++ server/core/runtime/s3_plan_store_test.go | 304 ++++++++++++++++++ server/events/project_command_builder.go | 39 ++- .../project_command_builder_internal_test.go | 6 + server/events/project_command_builder_test.go | 14 + server/server.go | 22 +- server/user_config.go | 7 + 12 files changed, 769 insertions(+), 4 deletions(-) create mode 100644 server/core/runtime/s3_plan_store.go create mode 100644 server/core/runtime/s3_plan_store_test.go diff --git a/cmd/server.go b/cmd/server.go index c0d5db4f8f..12d78ee0bc 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -127,6 +127,13 @@ const ( PendingApplyStatusFlag = "pending-apply-status" StatsNamespace = "stats-namespace" AllowDraftPRs = "allow-draft-prs" + PlanStoreFlag = "plan-store" + PlanStoreS3BucketFlag = "plan-store-s3-bucket" + PlanStoreS3EndpointFlag = "plan-store-s3-endpoint" + PlanStoreS3ForcePathStyleFlag = "plan-store-s3-force-path-style" + PlanStoreS3PrefixFlag = "plan-store-s3-prefix" + PlanStoreS3ProfileFlag = "plan-store-s3-profile" + PlanStoreS3RegionFlag = "plan-store-s3-region" PortFlag = "port" RedisDB = "redis-db" RedisHost = "redis-host" @@ -188,6 +195,7 @@ const ( DefaultMaxCommentsPerCommand = 100 DefaultParallelPoolSize = 15 DefaultStatsNamespace = "atlantis" + DefaultPlanStore = "local" DefaultPort = 4141 DefaultRedisDB = 0 DefaultRedisPort = 6379 @@ -414,6 +422,25 @@ var stringFlags = map[string]stringFlag{ description: "Namespace for aggregating stats.", defaultValue: DefaultStatsNamespace, }, + PlanStoreFlag: { + description: "Plan file storage backend. Supports 'local' (default) or 's3'.", + defaultValue: DefaultPlanStore, + }, + PlanStoreS3BucketFlag: { + description: "S3 bucket name for storing plan files. Required when --plan-store=s3.", + }, + PlanStoreS3EndpointFlag: { + description: "Custom S3 endpoint URL (for S3-compatible stores like MinIO).", + }, + PlanStoreS3PrefixFlag: { + description: "Key prefix for plan files in S3 (e.g. 'atlantis/plans').", + }, + PlanStoreS3ProfileFlag: { + description: "AWS profile to use for S3 plan store authentication.", + }, + PlanStoreS3RegionFlag: { + description: "AWS region for the S3 plan store bucket. Required when --plan-store=s3.", + }, RedisHost: { description: "The Redis Hostname for when using a Locking DB type of 'redis'.", }, @@ -581,6 +608,10 @@ var boolFlags = map[string]boolFlag{ description: "Set apply job status as pending when there are planned changes that haven't been applied yet. Currently only supported for GitLab.", defaultValue: false, }, + PlanStoreS3ForcePathStyleFlag: { + description: "Use S3 path-style addressing (required for MinIO and some S3-compatible stores).", + defaultValue: false, + }, QuietPolicyChecks: { description: "Exclude policy check comments from pull requests unless there's an actual error from conftest. This also excludes warnings.", defaultValue: false, @@ -1090,6 +1121,20 @@ func (s *ServerCmd) validate(userConfig server.UserConfig) error { } } + switch userConfig.PlanStore { + case "local", "": + // ok + case "s3": + if userConfig.PlanStoreS3Bucket == "" { + return fmt.Errorf("--%s is required when --%s=s3", PlanStoreS3BucketFlag, PlanStoreFlag) + } + if userConfig.PlanStoreS3Region == "" { + return fmt.Errorf("--%s is required when --%s=s3", PlanStoreS3RegionFlag, PlanStoreFlag) + } + default: + return fmt.Errorf("invalid --%s value %q: must be 'local' or 's3'", PlanStoreFlag, userConfig.PlanStore) + } + if userConfig.TFEHostname != DefaultTFEHostname && userConfig.TFEToken == "" { return fmt.Errorf("if setting --%s, must set --%s", TFEHostnameFlag, TFETokenFlag) } diff --git a/go.mod b/go.mod index 0303a72aff..71717f1002 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,9 @@ require ( code.gitea.io/sdk/gitea v0.23.2 github.com/Masterminds/sprig/v3 v3.3.0 github.com/alicebob/miniredis/v2 v2.36.1 + github.com/aws/aws-sdk-go-v2 v1.41.3 + github.com/aws/aws-sdk-go-v2/config v1.32.11 + github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 github.com/bmatcuk/doublestar/v4 v4.10.0 github.com/bradleyfalzon/ghinstallation/v2 v2.17.0 github.com/briandowns/spinner v1.23.2 @@ -74,6 +77,22 @@ require ( github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect github.com/ProtonMail/gopenpgp/v2 v2.7.5 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.11 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.12 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.8 // indirect + github.com/aws/smithy-go v1.24.2 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect diff --git a/go.sum b/go.sum index a31f4eb4f0..11bc9c0d8c 100644 --- a/go.sum +++ b/go.sum @@ -67,6 +67,44 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go-v2 v1.41.3 h1:4kQ/fa22KjDt13QCy1+bYADvdgcxpfH18f0zP542kZA= +github.com/aws/aws-sdk-go-v2 v1.41.3/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6 h1:N4lRUXZpZ1KVEUn6hxtco/1d2lgYhNn1fHkkl8WhlyQ= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI= +github.com/aws/aws-sdk-go-v2/config v1.32.11 h1:ftxI5sgz8jZkckuUHXfC/wMUc8u3fG1vQS0plr2F2Zs= +github.com/aws/aws-sdk-go-v2/config v1.32.11/go.mod h1:twF11+6ps9aNRKEDimksp923o44w/Thk9+8YIlzWMmo= +github.com/aws/aws-sdk-go-v2/credentials v1.19.11 h1:NdV8cwCcAXrCWyxArt58BrvZJ9pZ9Fhf9w6Uh5W3Uyc= +github.com/aws/aws-sdk-go-v2/credentials v1.19.11/go.mod h1:30yY2zqkMPdrvxBqzI9xQCM+WrlrZKSOpSJEsylVU+8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19 h1:INUvJxmhdEbVulJYHI061k4TVuS3jzzthNvjqvVvTKM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19/go.mod h1:FpZN2QISLdEBWkayloda+sZjVJL+e9Gl0k1SyTgcswU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 h1:/sECfyq2JTifMI2JPyZ4bdRN77zJmr6SrS1eL3augIA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19/go.mod h1:dMf8A5oAqr9/oxOfLkC/c2LU/uMcALP0Rgn2BD5LWn0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 h1:AWeJMk33GTBf6J20XJe6qZoRSJo0WfUhsMdUKhoODXE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19/go.mod h1:+GWrYoaAsV7/4pNHpwh1kiNLXkKaSoppxQq9lbH8Ejw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 h1:clHU5fm//kWS1C2HgtgWxfQbFbx4b6rx+5jzhgX9HrI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20 h1:qi3e/dmpdONhj1RyIZdi6DKKpDXS5Lb8ftr3p7cyHJc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20/go.mod h1:V1K+TeJVD5JOk3D9e5tsX2KUdL7BlB+FV6cBhdobN8c= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 h1:XAq62tBTJP/85lFD5oqOOe7YYgWxY9LvWq8plyDvDVg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 h1:BYf7XNsJMzl4mObARUBUib+j2tf0U//JAAtTnYqvqCw= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11/go.mod h1:aEUS4WrNk/+FxkBZZa7tVgp4pGH+kFGW40Y8rCPqt5g= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 h1:X1Tow7suZk9UCJHE1Iw9GMZJJl0dAnKXXP1NaSDHwmw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19/go.mod h1:/rARO8psX+4sfjUQXp5LLifjUt8DuATZ31WptNJTyQA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 h1:JnQeStZvPHFHeyky/7LbMlyQjUa+jIBj36OlWm0pzIk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19/go.mod h1:HGyasyHvYdFQeJhvDHfH7HXkHh57htcJGKDZ+7z+I24= +github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 h1:4ExZyubQ6LQQVuF2Qp9OsfEvsTdAWh5Gfwf6PgIdLdk= +github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4/go.mod h1:NF3JcMGOiARAss1ld3WGORCw71+4ExDD2cbbdKS5PpA= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.7 h1:Y2cAXlClHsXkkOvWZFXATr34b0hxxloeQu/pAZz2row= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.7/go.mod h1:idzZ7gmDeqeNrSPkdbtMp9qWMgcBwykA7P7Rzh5DXVU= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.12 h1:iSsvB9EtQ09YrsmIc44Heqlx5ByGErqhPK1ZQLppias= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.12/go.mod h1:fEWYKTRGoZNl8tZ77i61/ccwOMJdGxwOhWCkp6TXAr0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16 h1:EnUdUqRP1CNzt2DkV67tJx6XDN4xlfBFm+bzeNOQVb0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16/go.mod h1:Jic/xv0Rq/pFNCh3WwpH4BEqdbSAl+IyHro8LbibHD8= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.8 h1:XQTQTF75vnug2TXS8m7CVJfC2nniYPZnO1D4Np761Oo= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.8/go.mod h1:Xgx+PR1NUOjNmQY+tRMnouRp83JRM8pRMw/vCaVhPkI= +github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= +github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= diff --git a/server/controllers/events/events_controller_e2e_test.go b/server/controllers/events/events_controller_e2e_test.go index cc6aacbc21..49c57359fc 100644 --- a/server/controllers/events/events_controller_e2e_test.go +++ b/server/controllers/events/events_controller_e2e_test.go @@ -1453,6 +1453,7 @@ func setupE2E(t *testing.T, repoDir string, opt setupOption) (events_controllers "auto", statsScope, terraformClient, + &runtime.LocalPlanStore{}, ) showStepRunner, err := runtime.NewShowStepRunner(terraformClient, defaultTFDistribution, defaultTFVersion) diff --git a/server/core/runtime/plan_store.go b/server/core/runtime/plan_store.go index c108bf8018..a25e08ffe7 100644 --- a/server/core/runtime/plan_store.go +++ b/server/core/runtime/plan_store.go @@ -18,6 +18,11 @@ type PlanStore interface { Load(ctx command.ProjectContext, planPath string) error // Remove deletes a plan file (local + external) after apply/import/state-rm. Remove(ctx command.ProjectContext, planPath string) error + // RestorePlans discovers and downloads all plans for a pull request into + // pullDir. Only used by the "apply all" path (buildAllProjectCommandsByPlan) + // where the set of planned projects is unknown. The single-project apply + // path does not call this — it uses Load with an already-known key. + RestorePlans(pullDir, owner, repo string, pullNum int) error } // LocalPlanStore implements PlanStore using the local filesystem. @@ -35,3 +40,7 @@ func (s *LocalPlanStore) Load(_ command.ProjectContext, _ string) error { func (s *LocalPlanStore) Remove(_ command.ProjectContext, planPath string) error { return utils.RemoveIgnoreNonExistent(planPath) } + +func (s *LocalPlanStore) RestorePlans(_, _, _ string, _ int) error { + return nil // no-op: plans are already on the local filesystem +} diff --git a/server/core/runtime/s3_plan_store.go b/server/core/runtime/s3_plan_store.go new file mode 100644 index 0000000000..06361c6a1b --- /dev/null +++ b/server/core/runtime/s3_plan_store.go @@ -0,0 +1,269 @@ +// Copyright 2025 The Atlantis Authors +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/runatlantis/atlantis/server/events/command" + "github.com/runatlantis/atlantis/server/logging" + "github.com/runatlantis/atlantis/server/utils" +) + +// S3Client is the subset of the S3 API used by S3PlanStore, extracted for testability. +type S3Client interface { + PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) + GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) + DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error) + ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) +} + +// S3PlanStoreConfig holds configuration for connecting to S3. +type S3PlanStoreConfig struct { + Bucket string + Region string + Prefix string + Endpoint string + ForcePathStyle bool + Profile string +} + +// S3PlanStore implements PlanStore by persisting plan files to S3. +type S3PlanStore struct { + client S3Client + bucket string + prefix string + logger logging.SimpleLogging +} + +// NewS3PlanStore creates an S3PlanStore using the AWS SDK default credential chain. +func NewS3PlanStore(cfg S3PlanStoreConfig, logger logging.SimpleLogging) (*S3PlanStore, error) { + var opts []func(*awsconfig.LoadOptions) error + opts = append(opts, awsconfig.WithRegion(cfg.Region)) + + if cfg.Profile != "" { + opts = append(opts, awsconfig.WithSharedConfigProfile(cfg.Profile)) + } + + awsCfg, err := awsconfig.LoadDefaultConfig(context.Background(), opts...) + if err != nil { + return nil, fmt.Errorf("loading AWS config: %w", err) + } + + var s3Opts []func(*s3.Options) + if cfg.Endpoint != "" { + s3Opts = append(s3Opts, func(o *s3.Options) { + o.BaseEndpoint = aws.String(cfg.Endpoint) + if cfg.ForcePathStyle { + o.UsePathStyle = true + } + }) + } else if cfg.ForcePathStyle { + s3Opts = append(s3Opts, func(o *s3.Options) { + o.UsePathStyle = true + }) + } + + client := s3.NewFromConfig(awsCfg, s3Opts...) + return NewS3PlanStoreWithClient(client, cfg.Bucket, cfg.Prefix, logger), nil +} + +// NewS3PlanStoreWithClient creates an S3PlanStore with an injected S3Client (for testing). +func NewS3PlanStoreWithClient(client S3Client, bucket, prefix string, logger logging.SimpleLogging) *S3PlanStore { + return &S3PlanStore{ + client: client, + bucket: bucket, + prefix: strings.TrimSuffix(prefix, "/"), + logger: logger, + } +} + +// Save uploads the plan file at planPath to S3. +func (s *S3PlanStore) Save(ctx command.ProjectContext, planPath string) error { + key := s.s3Key(ctx, planPath) + + f, err := os.Open(planPath) + if err != nil { + return fmt.Errorf("opening plan file for S3 upload: %w", err) + } + defer f.Close() + + _, err = s.client.PutObject(context.Background(), &s3.PutObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + Body: f, + }) + if err != nil { + return fmt.Errorf("uploading plan to S3 (key=%s): %w", key, err) + } + + s.logger.Info("uploaded plan to s3://%s/%s", s.bucket, key) + return nil +} + +// Load downloads the plan file from S3 and writes it to planPath. +func (s *S3PlanStore) Load(ctx command.ProjectContext, planPath string) error { + key := s.s3Key(ctx, planPath) + + resp, err := s.client.GetObject(context.Background(), &s3.GetObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + }) + if err != nil { + return fmt.Errorf("downloading plan from S3 (key=%s): %w", key, err) + } + defer resp.Body.Close() + + if err := os.MkdirAll(filepath.Dir(planPath), 0o700); err != nil { + return fmt.Errorf("creating parent directories for plan file: %w", err) + } + + f, err := os.Create(planPath) + if err != nil { + return fmt.Errorf("creating local plan file: %w", err) + } + defer f.Close() + + if _, err := io.Copy(f, resp.Body); err != nil { + return fmt.Errorf("writing plan file from S3: %w", err) + } + + s.logger.Debug("downloaded plan from s3://%s/%s", s.bucket, key) + return nil +} + +// Remove deletes the plan file from S3 and locally. +func (s *S3PlanStore) Remove(ctx command.ProjectContext, planPath string) error { + key := s.s3Key(ctx, planPath) + + _, err := s.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + }) + if err != nil { + return fmt.Errorf("deleting plan from S3 (key=%s): %w", key, err) + } + + s.logger.Debug("deleted plan from s3://%s/%s", s.bucket, key) + return utils.RemoveIgnoreNonExistent(planPath) +} + +// RestorePlans lists all plan files for a pull request in S3 (via prefix scan) +// and downloads them into pullDir so PendingPlanFinder can discover them. +// Only called from the "apply all" path where we don't know which projects +// were planned. The single-project path skips this and uses Load directly. +// +// Note: plans downloaded here will be re-downloaded by Load() in +// ApplyStepRunner, which also validates head-commit metadata. This means +// each plan is fetched from S3 twice in the "apply all" path. Acceptable +// since plan files are small; eliminating it would require shared state +// between RestorePlans and Load. +func (s *S3PlanStore) RestorePlans(pullDir, owner, repo string, pullNum int) error { + // Build the S3 prefix for all plans under this pull request. + prefixParts := []string{} + if s.prefix != "" { + prefixParts = append(prefixParts, s.prefix) + } + prefixParts = append(prefixParts, owner, repo, strconv.Itoa(pullNum)) + listPrefix := strings.Join(prefixParts, "/") + "/" + + var restored int + var continuationToken *string + for { + resp, err := s.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{ + Bucket: aws.String(s.bucket), + Prefix: aws.String(listPrefix), + ContinuationToken: continuationToken, + }) + if err != nil { + return fmt.Errorf("listing plans from S3 (prefix=%s): %w", listPrefix, err) + } + + for _, obj := range resp.Contents { + key := aws.ToString(obj.Key) + if !strings.HasSuffix(key, ".tfplan") { + continue + } + + // Strip the prefix up to and including / to get the relative path. + relPath := strings.TrimPrefix(key, listPrefix) + localPath := filepath.Join(pullDir, relPath) + + if err := os.MkdirAll(filepath.Dir(localPath), 0o700); err != nil { + return fmt.Errorf("creating directory for restored plan: %w", err) + } + + getResp, err := s.client.GetObject(context.Background(), &s3.GetObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + }) + if err != nil { + return fmt.Errorf("downloading plan from S3 (key=%s): %w", key, err) + } + + f, err := os.Create(localPath) + if err != nil { + getResp.Body.Close() + return fmt.Errorf("creating local plan file %s: %w", localPath, err) + } + + _, copyErr := io.Copy(f, getResp.Body) + f.Close() + getResp.Body.Close() + if copyErr != nil { + return fmt.Errorf("writing restored plan file %s: %w", localPath, copyErr) + } + + restored++ + s.logger.Info("restored plan from s3://%s/%s to %s", s.bucket, key, localPath) + } + + if !aws.ToBool(resp.IsTruncated) { + break + } + continuationToken = resp.NextContinuationToken + } + + s.logger.Info("restored %d plan(s) from S3 for %s/%s#%d", restored, owner, repo, pullNum) + return nil +} + +// s3Key builds a deterministic S3 object key from the ProjectContext and plan filename. +// Format: ////// +func (s *S3PlanStore) s3Key(ctx command.ProjectContext, planPath string) string { + parts := []string{} + if s.prefix != "" { + parts = append(parts, s.prefix) + } + parts = append(parts, + ctx.BaseRepo.Owner, + ctx.BaseRepo.Name, + strconv.Itoa(ctx.Pull.Num), + ctx.Workspace, + ctx.RepoRelDir, + filepath.Base(planPath), + ) + return strings.Join(parts, "/") +} + +// TestS3Key is exported for testing only. +func (s *S3PlanStore) TestS3Key(ctx command.ProjectContext, planPath string) string { + return s.s3Key(ctx, planPath) +} + +// Ensure S3PlanStore satisfies PlanStore at compile time. +var _ PlanStore = (*S3PlanStore)(nil) + +// Ensure the real S3 client satisfies our interface at compile time. +var _ S3Client = (*s3.Client)(nil) + diff --git a/server/core/runtime/s3_plan_store_test.go b/server/core/runtime/s3_plan_store_test.go new file mode 100644 index 0000000000..5d98506cd5 --- /dev/null +++ b/server/core/runtime/s3_plan_store_test.go @@ -0,0 +1,304 @@ +// Copyright 2025 The Atlantis Authors +// SPDX-License-Identifier: Apache-2.0 + +package runtime_test + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "path/filepath" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/runatlantis/atlantis/server/core/runtime" + "github.com/runatlantis/atlantis/server/events/command" + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/logging" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// mockS3Client records calls and returns configured responses. +type mockS3Client struct { + putInput *s3.PutObjectInput + putBody []byte + putErr error + getBody []byte + getErr error + deleteInput *s3.DeleteObjectInput + deleteErr error + + // For ListObjectsV2 / RestorePlans testing + listOutput *s3.ListObjectsV2Output + listErr error + // getObjects maps S3 key to body content for multi-key GetObject calls + getObjects map[string][]byte +} + +func (m *mockS3Client) PutObject(_ context.Context, input *s3.PutObjectInput, _ ...func(*s3.Options)) (*s3.PutObjectOutput, error) { + m.putInput = input + if input.Body != nil { + b, _ := io.ReadAll(input.Body) + m.putBody = b + } + return &s3.PutObjectOutput{}, m.putErr +} + +func (m *mockS3Client) GetObject(_ context.Context, input *s3.GetObjectInput, _ ...func(*s3.Options)) (*s3.GetObjectOutput, error) { + if m.getErr != nil { + return nil, m.getErr + } + // Support per-key bodies for RestorePlans testing. + if m.getObjects != nil { + key := *input.Key + if body, ok := m.getObjects[key]; ok { + return &s3.GetObjectOutput{ + Body: io.NopCloser(bytes.NewReader(body)), + }, nil + } + return nil, errors.New("no such key: " + key) + } + return &s3.GetObjectOutput{ + Body: io.NopCloser(bytes.NewReader(m.getBody)), + }, nil +} + +func (m *mockS3Client) DeleteObject(_ context.Context, input *s3.DeleteObjectInput, _ ...func(*s3.Options)) (*s3.DeleteObjectOutput, error) { + m.deleteInput = input + return &s3.DeleteObjectOutput{}, m.deleteErr +} + +func (m *mockS3Client) ListObjectsV2(_ context.Context, _ *s3.ListObjectsV2Input, _ ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) { + if m.listErr != nil { + return nil, m.listErr + } + if m.listOutput != nil { + return m.listOutput, nil + } + return &s3.ListObjectsV2Output{}, nil +} + +func testProjectContext() command.ProjectContext { + return command.ProjectContext{ + BaseRepo: models.Repo{ + Owner: "acme", + Name: "infra", + }, + Pull: models.PullRequest{ + Num: 42, + }, + Workspace: "default", + RepoRelDir: "modules/vpc", + } +} + +func TestS3Key_WithPrefix(t *testing.T) { + store := runtime.NewS3PlanStoreWithClient(&mockS3Client{}, "bucket", "atlantis/plans", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + key := store.TestS3Key(ctx, "/tmp/plans/myproject-default.tfplan") + assert.Equal(t, "atlantis/plans/acme/infra/42/default/modules/vpc/myproject-default.tfplan", key) +} + +func TestS3Key_WithoutPrefix(t *testing.T) { + store := runtime.NewS3PlanStoreWithClient(&mockS3Client{}, "bucket", "", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + key := store.TestS3Key(ctx, "/tmp/plans/myproject-default.tfplan") + assert.Equal(t, "acme/infra/42/default/modules/vpc/myproject-default.tfplan", key) +} + +func TestS3Key_NestedRepoRelDir(t *testing.T) { + store := runtime.NewS3PlanStoreWithClient(&mockS3Client{}, "bucket", "pfx", logging.NewNoopLogger(t)) + ctx := testProjectContext() + ctx.RepoRelDir = "envs/prod/us-east-1" + + key := store.TestS3Key(ctx, "/tmp/plan.tfplan") + assert.Equal(t, "pfx/acme/infra/42/default/envs/prod/us-east-1/plan.tfplan", key) +} + +func TestS3Key_TrailingSlashPrefix(t *testing.T) { + store := runtime.NewS3PlanStoreWithClient(&mockS3Client{}, "bucket", "prefix/", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + key := store.TestS3Key(ctx, "/tmp/plan.tfplan") + assert.Equal(t, "prefix/acme/infra/42/default/modules/vpc/plan.tfplan", key) +} + +func TestSave_Success(t *testing.T) { + mock := &mockS3Client{} + store := runtime.NewS3PlanStoreWithClient(mock, "my-bucket", "pfx", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + planDir := t.TempDir() + planPath := filepath.Join(planDir, "test.tfplan") + require.NoError(t, os.WriteFile(planPath, []byte("plan-content"), 0o644)) + + err := store.Save(ctx, planPath) + require.NoError(t, err) + + assert.Equal(t, "my-bucket", *mock.putInput.Bucket) + assert.Equal(t, "pfx/acme/infra/42/default/modules/vpc/test.tfplan", *mock.putInput.Key) + assert.Equal(t, []byte("plan-content"), mock.putBody) +} + +func TestSave_S3Error(t *testing.T) { + mock := &mockS3Client{putErr: errors.New("access denied")} + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + planDir := t.TempDir() + planPath := filepath.Join(planDir, "test.tfplan") + require.NoError(t, os.WriteFile(planPath, []byte("data"), 0o644)) + + err := store.Save(ctx, planPath) + assert.ErrorContains(t, err, "access denied") +} + +func TestSave_FileOpenError(t *testing.T) { + store := runtime.NewS3PlanStoreWithClient(&mockS3Client{}, "bucket", "", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + err := store.Save(ctx, "/nonexistent/path/plan.tfplan") + assert.ErrorContains(t, err, "opening plan file") +} + +func TestLoad_Success(t *testing.T) { + planContent := []byte("downloaded-plan-data") + mock := &mockS3Client{getBody: planContent} + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "pfx", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + planDir := t.TempDir() + planPath := filepath.Join(planDir, "subdir", "test.tfplan") + + err := store.Load(ctx, planPath) + require.NoError(t, err) + + got, err := os.ReadFile(planPath) + require.NoError(t, err) + assert.Equal(t, planContent, got) +} + +func TestLoad_S3Error(t *testing.T) { + mock := &mockS3Client{getErr: errors.New("no such key")} + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + err := store.Load(ctx, "/tmp/nonexistent/plan.tfplan") + assert.ErrorContains(t, err, "no such key") +} + +func TestRemove_Success(t *testing.T) { + mock := &mockS3Client{} + store := runtime.NewS3PlanStoreWithClient(mock, "my-bucket", "pfx", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + planDir := t.TempDir() + planPath := filepath.Join(planDir, "test.tfplan") + require.NoError(t, os.WriteFile(planPath, []byte("data"), 0o644)) + + err := store.Remove(ctx, planPath) + require.NoError(t, err) + + assert.Equal(t, "my-bucket", *mock.deleteInput.Bucket) + assert.Equal(t, "pfx/acme/infra/42/default/modules/vpc/test.tfplan", *mock.deleteInput.Key) + + _, statErr := os.Stat(planPath) + assert.True(t, os.IsNotExist(statErr)) +} + +func TestRemove_S3Error(t *testing.T) { + mock := &mockS3Client{deleteErr: errors.New("forbidden")} + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + err := store.Remove(ctx, "/tmp/whatever.tfplan") + assert.ErrorContains(t, err, "forbidden") +} + +func TestRemove_LocalFileAlreadyGone(t *testing.T) { + mock := &mockS3Client{} + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "", logging.NewNoopLogger(t)) + ctx := testProjectContext() + + err := store.Remove(ctx, "/tmp/nonexistent-plan-file.tfplan") + require.NoError(t, err) +} + +func TestRestorePlans_Success(t *testing.T) { + mock := &mockS3Client{ + listOutput: &s3.ListObjectsV2Output{ + Contents: []s3types.Object{ + {Key: aws.String("pfx/acme/infra/42/default/modules/vpc/plan.tfplan")}, + {Key: aws.String("pfx/acme/infra/42/staging/modules/rds/plan.tfplan")}, + {Key: aws.String("pfx/acme/infra/42/default/some-other-file.txt")}, // not a .tfplan — skipped + }, + }, + getObjects: map[string][]byte{ + "pfx/acme/infra/42/default/modules/vpc/plan.tfplan": []byte("plan-vpc"), + "pfx/acme/infra/42/staging/modules/rds/plan.tfplan": []byte("plan-rds"), + }, + } + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "pfx", logging.NewNoopLogger(t)) + pullDir := t.TempDir() + + err := store.RestorePlans(pullDir, "acme", "infra", 42) + require.NoError(t, err) + + // Verify files were written to the correct paths. + got1, err := os.ReadFile(filepath.Join(pullDir, "default", "modules", "vpc", "plan.tfplan")) + require.NoError(t, err) + assert.Equal(t, []byte("plan-vpc"), got1) + + got2, err := os.ReadFile(filepath.Join(pullDir, "staging", "modules", "rds", "plan.tfplan")) + require.NoError(t, err) + assert.Equal(t, []byte("plan-rds"), got2) +} + +func TestRestorePlans_NoPlansFound(t *testing.T) { + mock := &mockS3Client{ + listOutput: &s3.ListObjectsV2Output{ + Contents: []s3types.Object{}, + }, + } + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "pfx", logging.NewNoopLogger(t)) + + err := store.RestorePlans(t.TempDir(), "acme", "infra", 42) + require.NoError(t, err) +} + +func TestRestorePlans_ListError(t *testing.T) { + mock := &mockS3Client{listErr: errors.New("access denied")} + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "pfx", logging.NewNoopLogger(t)) + + err := store.RestorePlans(t.TempDir(), "acme", "infra", 42) + assert.ErrorContains(t, err, "access denied") +} + +func TestRestorePlans_WithoutPrefix(t *testing.T) { + mock := &mockS3Client{ + listOutput: &s3.ListObjectsV2Output{ + Contents: []s3types.Object{ + {Key: aws.String("acme/infra/42/default/plan.tfplan")}, + }, + }, + getObjects: map[string][]byte{ + "acme/infra/42/default/plan.tfplan": []byte("plan-data"), + }, + } + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "", logging.NewNoopLogger(t)) + pullDir := t.TempDir() + + err := store.RestorePlans(pullDir, "acme", "infra", 42) + require.NoError(t, err) + + got, err := os.ReadFile(filepath.Join(pullDir, "default", "plan.tfplan")) + require.NoError(t, err) + assert.Equal(t, []byte("plan-data"), got) +} diff --git a/server/events/project_command_builder.go b/server/events/project_command_builder.go index a2a06b13bc..8952f719f2 100644 --- a/server/events/project_command_builder.go +++ b/server/events/project_command_builder.go @@ -15,6 +15,7 @@ import ( tally "github.com/uber-go/tally/v4" "github.com/runatlantis/atlantis/server/core/config/valid" + "github.com/runatlantis/atlantis/server/core/runtime" "github.com/runatlantis/atlantis/server/core/terraform/tfclient" "github.com/runatlantis/atlantis/server/logging" "github.com/runatlantis/atlantis/server/metrics" @@ -62,6 +63,7 @@ func NewInstrumentedProjectCommandBuilder( AutoDiscoverMode string, scope tally.Scope, terraformClient tfclient.Client, + planStore runtime.PlanStore, ) *InstrumentedProjectCommandBuilder { scope = scope.SubScope("builder") @@ -93,6 +95,7 @@ func NewInstrumentedProjectCommandBuilder( AutoDiscoverMode, scope, terraformClient, + planStore, ), Logger: logger, scope: scope, @@ -122,6 +125,7 @@ func NewProjectCommandBuilder( AutoDiscoverMode string, scope tally.Scope, terraformClient tfclient.Client, + planStore runtime.PlanStore, ) *DefaultProjectCommandBuilder { return &DefaultProjectCommandBuilder{ ParserValidator: parserValidator, @@ -131,6 +135,7 @@ func NewProjectCommandBuilder( WorkingDirLocker: workingDirLocker, GlobalCfg: globalCfg, PendingPlanFinder: pendingPlanFinder, + PlanStore: planStore, SkipCloneNoChanges: skipCloneNoChanges, EnableRegExpCmd: EnableRegExpCmd, EnableAutoMerge: EnableAutoMerge, @@ -224,6 +229,8 @@ type DefaultProjectCommandBuilder struct { GlobalCfg valid.GlobalCfg // Finds unapplied plans. PendingPlanFinder *DefaultPendingPlanFinder + // Persists plan files to external storage (S3) so they survive pod restarts. + PlanStore runtime.PlanStore // Builds project command contexts for Atlantis commands. ProjectCommandContextBuilder ProjectCommandContextBuilder // User config option: Skip cloning the repo during autoplan if there are no changes to Terraform projects. @@ -797,7 +804,25 @@ func (p *DefaultProjectCommandBuilder) getCfg(ctx *command.Context, projectName func (p *DefaultProjectCommandBuilder) buildAllProjectCommandsByPlan(ctx *command.Context, commentCmd *CommentCommand) ([]command.ProjectContext, error) { pullDir, err := p.WorkingDir.GetPullDir(ctx.Pull.BaseRepo, ctx.Pull) if err != nil { - return nil, err + if !os.IsNotExist(err) { + return nil, err + } + if _, isLocal := p.PlanStore.(*runtime.LocalPlanStore); isLocal { + return nil, err + } + // External plan store: working directory lost (e.g. pod restart with + // emptyDir). Re-clone and restore plan files from the external store. + ctx.Log.Info("pull directory missing, re-cloning repo for apply") + if _, cloneErr := p.WorkingDir.Clone(ctx.Log, ctx.HeadRepo, ctx.Pull, DefaultWorkspace); cloneErr != nil { + return nil, fmt.Errorf("re-cloning repo for apply: %w", cloneErr) + } + pullDir, err = p.WorkingDir.GetPullDir(ctx.Pull.BaseRepo, ctx.Pull) + if err != nil { + return nil, err + } + if restoreErr := p.PlanStore.RestorePlans(pullDir, ctx.Pull.BaseRepo.Owner, ctx.Pull.BaseRepo.Name, ctx.Pull.Num); restoreErr != nil { + return nil, fmt.Errorf("restoring plans from external store: %w", restoreErr) + } } plans, err := p.PendingPlanFinder.Find(pullDir) @@ -853,7 +878,17 @@ func (p *DefaultProjectCommandBuilder) buildProjectCommand(ctx *command.Context, // other workspaces will not have the file if they are using pre_workflow_hooks to generate it dynamically repoDir, err := p.WorkingDir.GetWorkingDir(ctx.Pull.BaseRepo, ctx.Pull, DefaultWorkspace) if errors.Is(err, os.ErrNotExist) { - return projCtx, errors.New("no working directory found–did you run plan?") + if _, isLocal := p.PlanStore.(*runtime.LocalPlanStore); isLocal { + return projCtx, errors.New("no working directory found–did you run plan?") + } + // External plan store: working directory lost (e.g. pod restart with + // emptyDir). Re-clone; the plan file will be loaded from the external + // store during the apply step. + ctx.Log.Info("working directory missing, re-cloning repo for apply") + repoDir, err = p.WorkingDir.Clone(ctx.Log, ctx.HeadRepo, ctx.Pull, workspace) + if err != nil { + return projCtx, fmt.Errorf("re-cloning repo for apply: %w", err) + } } else if err != nil { return projCtx, err } diff --git a/server/events/project_command_builder_internal_test.go b/server/events/project_command_builder_internal_test.go index ceebf53063..6d9572d0dd 100644 --- a/server/events/project_command_builder_internal_test.go +++ b/server/events/project_command_builder_internal_test.go @@ -12,6 +12,7 @@ import ( . "github.com/petergtz/pegomock/v4" "github.com/runatlantis/atlantis/server/core/config" "github.com/runatlantis/atlantis/server/core/config/valid" + "github.com/runatlantis/atlantis/server/core/runtime" tfclientmocks "github.com/runatlantis/atlantis/server/core/terraform/tfclient/mocks" "github.com/runatlantis/atlantis/server/events/command" "github.com/runatlantis/atlantis/server/events/models" @@ -676,6 +677,7 @@ projects: "auto", statsScope, terraformClient, + &runtime.LocalPlanStore{}, ) // We run a test for each type of command. @@ -893,6 +895,7 @@ projects: "auto", statsScope, terraformClient, + &runtime.LocalPlanStore{}, ) // We run a test for each type of command, again specific projects @@ -1140,6 +1143,7 @@ workflows: "auto", statsScope, terraformClient, + &runtime.LocalPlanStore{}, ) cmd := command.PolicyCheck @@ -1292,6 +1296,7 @@ projects: "auto", statsScope, terraformClient, + &runtime.LocalPlanStore{}, ) for _, cmd := range []command.Name{command.Plan, command.Apply} { @@ -1514,6 +1519,7 @@ autodiscover: "auto", statsScope, terraformClient, + &runtime.LocalPlanStore{}, ) ctxs, err := builder.BuildPlanCommands( diff --git a/server/events/project_command_builder_test.go b/server/events/project_command_builder_test.go index ad80a99c4e..ae036fc84c 100644 --- a/server/events/project_command_builder_test.go +++ b/server/events/project_command_builder_test.go @@ -17,6 +17,7 @@ import ( "github.com/runatlantis/atlantis/server/core/config" "github.com/runatlantis/atlantis/server/core/config/valid" + "github.com/runatlantis/atlantis/server/core/runtime" "github.com/runatlantis/atlantis/server/events" "github.com/runatlantis/atlantis/server/events/command" "github.com/runatlantis/atlantis/server/events/mocks" @@ -278,6 +279,7 @@ terraform { userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) ctxs, err := builder.BuildAutoplanCommands(&command.Context{ @@ -644,6 +646,7 @@ projects: c.AutoDiscoverModeUserCfg, scope, terraformClient, + &runtime.LocalPlanStore{}, ) var actCtxs []command.ProjectContext @@ -832,6 +835,7 @@ projects: userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) var actCtxs []command.ProjectContext @@ -1197,6 +1201,7 @@ projects: userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) ctxs, err := builder.BuildPlanCommands( @@ -1295,6 +1300,7 @@ func TestDefaultProjectCommandBuilder_BuildMultiApply(t *testing.T) { userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) ctxs, err := builder.BuildApplyCommands( @@ -1381,6 +1387,7 @@ projects: userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) ctx := &command.Context{ @@ -1469,6 +1476,7 @@ func TestDefaultProjectCommandBuilder_EscapeArgs(t *testing.T) { userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) var actCtxs []command.ProjectContext @@ -1631,6 +1639,7 @@ projects: userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) actCtxs, err := builder.BuildPlanCommands( @@ -1772,6 +1781,7 @@ projects: userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) var actCtxs []command.ProjectContext @@ -1856,6 +1866,7 @@ func TestDefaultProjectCommandBuilder_WithPolicyCheckEnabled_BuildAutoplanComman userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) ctxs, err := builder.BuildAutoplanCommands(&command.Context{ @@ -1944,6 +1955,7 @@ func TestDefaultProjectCommandBuilder_BuildVersionCommand(t *testing.T) { userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) ctxs, err := builder.BuildVersionCommands( @@ -2074,6 +2086,7 @@ func TestDefaultProjectCommandBuilder_BuildPlanCommands_Single_With_RestrictFile userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) var actCtxs []command.ProjectContext @@ -2185,6 +2198,7 @@ func TestDefaultProjectCommandBuilder_BuildPlanCommands_with_IncludeGitUntracked userConfig.AutoDiscoverMode, scope, terraformClient, + &runtime.LocalPlanStore{}, ) var actCtxs []command.ProjectContext diff --git a/server/server.go b/server/server.go index bdc8485d5b..f18a99f5f6 100644 --- a/server/server.go +++ b/server/server.go @@ -648,6 +648,25 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { CommitStatusUpdater: commitStatusUpdater, Router: router, } + var planStore runtime.PlanStore + switch userConfig.PlanStore { + case "s3": + logger.Info("initializing S3 plan store (bucket=%s, region=%s)", userConfig.PlanStoreS3Bucket, userConfig.PlanStoreS3Region) + planStore, err = runtime.NewS3PlanStore(runtime.S3PlanStoreConfig{ + Bucket: userConfig.PlanStoreS3Bucket, + Region: userConfig.PlanStoreS3Region, + Prefix: userConfig.PlanStoreS3Prefix, + Endpoint: userConfig.PlanStoreS3Endpoint, + ForcePathStyle: userConfig.PlanStoreS3ForcePathStyle, + Profile: userConfig.PlanStoreS3Profile, + }, logger) + if err != nil { + return nil, fmt.Errorf("initializing S3 plan store: %w", err) + } + default: + planStore = &runtime.LocalPlanStore{} + } + projectCommandBuilder := events.NewInstrumentedProjectCommandBuilder( logger, policyChecksEnabled, @@ -672,10 +691,9 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { userConfig.AutoDiscoverModeFlag, statsScope, terraformClient, + planStore, ) - planStore := &runtime.LocalPlanStore{} - showStepRunner, err := runtime.NewShowStepRunner(terraformClient, defaultTfDistribution, defaultTfVersion) if err != nil { diff --git a/server/user_config.go b/server/user_config.go index fcf25a535e..51f9b8ba17 100644 --- a/server/user_config.go +++ b/server/user_config.go @@ -92,6 +92,13 @@ type UserConfig struct { PendingApplyStatus bool `mapstructure:"pending-apply-status"` StatsNamespace string `mapstructure:"stats-namespace"` PlanDrafts bool `mapstructure:"allow-draft-prs"` + PlanStore string `mapstructure:"plan-store"` + PlanStoreS3Bucket string `mapstructure:"plan-store-s3-bucket"` + PlanStoreS3Endpoint string `mapstructure:"plan-store-s3-endpoint"` + PlanStoreS3ForcePathStyle bool `mapstructure:"plan-store-s3-force-path-style"` + PlanStoreS3Prefix string `mapstructure:"plan-store-s3-prefix"` + PlanStoreS3Profile string `mapstructure:"plan-store-s3-profile"` + PlanStoreS3Region string `mapstructure:"plan-store-s3-region"` Port int `mapstructure:"port"` QuietPolicyChecks bool `mapstructure:"quiet-policy-checks"` RedisDB int `mapstructure:"redis-db"` From 73be72d2e3f94385742c4d1fd2c26a639b179010 Mon Sep 17 00:00:00 2001 From: "daanvinken@tythus.com" Date: Fri, 13 Mar 2026 14:05:25 +0100 Subject: [PATCH 3/8] feat: validate S3 bucket access at startup with HeadBucket Call HeadBucket during NewS3PlanStore to fail fast on misconfigured bucket or credentials instead of silently failing on first plan. Signed-off-by: Daan Vinken Signed-off-by: Daan Vinken --- server/core/runtime/s3_plan_store.go | 8 ++++++++ server/core/runtime/s3_plan_store_test.go | 7 +++++++ 2 files changed, 15 insertions(+) diff --git a/server/core/runtime/s3_plan_store.go b/server/core/runtime/s3_plan_store.go index 06361c6a1b..c799e5121b 100644 --- a/server/core/runtime/s3_plan_store.go +++ b/server/core/runtime/s3_plan_store.go @@ -22,6 +22,7 @@ import ( // S3Client is the subset of the S3 API used by S3PlanStore, extracted for testability. type S3Client interface { + HeadBucket(ctx context.Context, params *s3.HeadBucketInput, optFns ...func(*s3.Options)) (*s3.HeadBucketOutput, error) PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error) @@ -75,6 +76,13 @@ func NewS3PlanStore(cfg S3PlanStoreConfig, logger logging.SimpleLogging) (*S3Pla } client := s3.NewFromConfig(awsCfg, s3Opts...) + + if _, err := client.HeadBucket(context.Background(), &s3.HeadBucketInput{ + Bucket: aws.String(cfg.Bucket), + }); err != nil { + return nil, fmt.Errorf("validating S3 plan store bucket %q: %w", cfg.Bucket, err) + } + return NewS3PlanStoreWithClient(client, cfg.Bucket, cfg.Prefix, logger), nil } diff --git a/server/core/runtime/s3_plan_store_test.go b/server/core/runtime/s3_plan_store_test.go index 5d98506cd5..4a0435629a 100644 --- a/server/core/runtime/s3_plan_store_test.go +++ b/server/core/runtime/s3_plan_store_test.go @@ -33,6 +33,9 @@ type mockS3Client struct { deleteInput *s3.DeleteObjectInput deleteErr error + // For HeadBucket startup validation + headBucketErr error + // For ListObjectsV2 / RestorePlans testing listOutput *s3.ListObjectsV2Output listErr error @@ -40,6 +43,10 @@ type mockS3Client struct { getObjects map[string][]byte } +func (m *mockS3Client) HeadBucket(_ context.Context, _ *s3.HeadBucketInput, _ ...func(*s3.Options)) (*s3.HeadBucketOutput, error) { + return &s3.HeadBucketOutput{}, m.headBucketErr +} + func (m *mockS3Client) PutObject(_ context.Context, input *s3.PutObjectInput, _ ...func(*s3.Options)) (*s3.PutObjectOutput, error) { m.putInput = input if input.Body != nil { From 269dfa090cb1437870db875e6bd44e709fd90b38 Mon Sep 17 00:00:00 2001 From: "daanvinken@tythus.com" Date: Fri, 13 Mar 2026 14:13:07 +0100 Subject: [PATCH 4/8] feat: clean up S3 plan files on PR close via DeleteForPull Add DeleteForPull to PlanStore interface and implement in S3PlanStore using ListObjectsV2 + DeleteObject per key. Hook into PullClosedExecutor.CleanUpPull() to prevent plan accumulation in S3. Failures are logged as warnings to avoid blocking local cleanup. Signed-off-by: Daan Vinken Signed-off-by: Daan Vinken --- server/core/runtime/plan_store.go | 7 +++ server/core/runtime/s3_plan_store.go | 44 +++++++++++++++++ server/core/runtime/s3_plan_store_test.go | 59 +++++++++++++++++++++++ server/events/pull_closed_executor.go | 8 +++ server/server.go | 27 ++++++----- 5 files changed, 132 insertions(+), 13 deletions(-) diff --git a/server/core/runtime/plan_store.go b/server/core/runtime/plan_store.go index a25e08ffe7..c38a55d880 100644 --- a/server/core/runtime/plan_store.go +++ b/server/core/runtime/plan_store.go @@ -23,6 +23,9 @@ type PlanStore interface { // where the set of planned projects is unknown. The single-project apply // path does not call this — it uses Load with an already-known key. RestorePlans(pullDir, owner, repo string, pullNum int) error + // DeleteForPull removes all stored plan files for a pull request. + // Called during PR close/merge cleanup. + DeleteForPull(owner, repo string, pullNum int) error } // LocalPlanStore implements PlanStore using the local filesystem. @@ -44,3 +47,7 @@ func (s *LocalPlanStore) Remove(_ command.ProjectContext, planPath string) error func (s *LocalPlanStore) RestorePlans(_, _, _ string, _ int) error { return nil // no-op: plans are already on the local filesystem } + +func (s *LocalPlanStore) DeleteForPull(_, _ string, _ int) error { + return nil // no-op: working dir deletion handles local files +} diff --git a/server/core/runtime/s3_plan_store.go b/server/core/runtime/s3_plan_store.go index c799e5121b..bfc0aafad0 100644 --- a/server/core/runtime/s3_plan_store.go +++ b/server/core/runtime/s3_plan_store.go @@ -246,6 +246,50 @@ func (s *S3PlanStore) RestorePlans(pullDir, owner, repo string, pullNum int) err return nil } +// DeleteForPull removes all plan objects stored under the pull request prefix in S3. +func (s *S3PlanStore) DeleteForPull(owner, repo string, pullNum int) error { + prefixParts := []string{} + if s.prefix != "" { + prefixParts = append(prefixParts, s.prefix) + } + prefixParts = append(prefixParts, owner, repo, strconv.Itoa(pullNum)) + listPrefix := strings.Join(prefixParts, "/") + "/" + + var deleted int + var continuationToken *string + for { + resp, err := s.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{ + Bucket: aws.String(s.bucket), + Prefix: aws.String(listPrefix), + ContinuationToken: continuationToken, + }) + if err != nil { + return fmt.Errorf("listing plans for deletion (prefix=%s): %w", listPrefix, err) + } + + for _, obj := range resp.Contents { + key := aws.ToString(obj.Key) + if _, err := s.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + }); err != nil { + return fmt.Errorf("deleting plan from S3 (key=%s): %w", key, err) + } + deleted++ + } + + if !aws.ToBool(resp.IsTruncated) { + break + } + continuationToken = resp.NextContinuationToken + } + + if deleted > 0 { + s.logger.Info("deleted %d plan(s) from S3 for %s/%s#%d", deleted, owner, repo, pullNum) + } + return nil +} + // s3Key builds a deterministic S3 object key from the ProjectContext and plan filename. // Format: ////// func (s *S3PlanStore) s3Key(ctx command.ProjectContext, planPath string) string { diff --git a/server/core/runtime/s3_plan_store_test.go b/server/core/runtime/s3_plan_store_test.go index 4a0435629a..5a69699034 100644 --- a/server/core/runtime/s3_plan_store_test.go +++ b/server/core/runtime/s3_plan_store_test.go @@ -32,6 +32,8 @@ type mockS3Client struct { getErr error deleteInput *s3.DeleteObjectInput deleteErr error + // deletedKeys tracks all keys passed to DeleteObject + deletedKeys []string // For HeadBucket startup validation headBucketErr error @@ -77,6 +79,7 @@ func (m *mockS3Client) GetObject(_ context.Context, input *s3.GetObjectInput, _ func (m *mockS3Client) DeleteObject(_ context.Context, input *s3.DeleteObjectInput, _ ...func(*s3.Options)) (*s3.DeleteObjectOutput, error) { m.deleteInput = input + m.deletedKeys = append(m.deletedKeys, aws.ToString(input.Key)) return &s3.DeleteObjectOutput{}, m.deleteErr } @@ -309,3 +312,59 @@ func TestRestorePlans_WithoutPrefix(t *testing.T) { require.NoError(t, err) assert.Equal(t, []byte("plan-data"), got) } + +func TestDeleteForPull_Success(t *testing.T) { + mock := &mockS3Client{ + listOutput: &s3.ListObjectsV2Output{ + Contents: []s3types.Object{ + {Key: aws.String("pfx/acme/infra/42/default/modules/vpc/plan.tfplan")}, + {Key: aws.String("pfx/acme/infra/42/staging/modules/rds/plan.tfplan")}, + }, + }, + } + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "pfx", logging.NewNoopLogger(t)) + + err := store.DeleteForPull("acme", "infra", 42) + require.NoError(t, err) + + assert.Equal(t, []string{ + "pfx/acme/infra/42/default/modules/vpc/plan.tfplan", + "pfx/acme/infra/42/staging/modules/rds/plan.tfplan", + }, mock.deletedKeys) +} + +func TestDeleteForPull_NoObjects(t *testing.T) { + mock := &mockS3Client{ + listOutput: &s3.ListObjectsV2Output{ + Contents: []s3types.Object{}, + }, + } + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "pfx", logging.NewNoopLogger(t)) + + err := store.DeleteForPull("acme", "infra", 42) + require.NoError(t, err) + assert.Empty(t, mock.deletedKeys) +} + +func TestDeleteForPull_ListError(t *testing.T) { + mock := &mockS3Client{listErr: errors.New("access denied")} + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "pfx", logging.NewNoopLogger(t)) + + err := store.DeleteForPull("acme", "infra", 42) + assert.ErrorContains(t, err, "access denied") +} + +func TestDeleteForPull_DeleteError(t *testing.T) { + mock := &mockS3Client{ + listOutput: &s3.ListObjectsV2Output{ + Contents: []s3types.Object{ + {Key: aws.String("pfx/acme/infra/42/default/plan.tfplan")}, + }, + }, + deleteErr: errors.New("forbidden"), + } + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "pfx", logging.NewNoopLogger(t)) + + err := store.DeleteForPull("acme", "infra", 42) + assert.ErrorContains(t, err, "forbidden") +} diff --git a/server/events/pull_closed_executor.go b/server/events/pull_closed_executor.go index c1f3a38697..f0bf5e436b 100644 --- a/server/events/pull_closed_executor.go +++ b/server/events/pull_closed_executor.go @@ -26,6 +26,7 @@ import ( "github.com/runatlantis/atlantis/server/core/db" "github.com/runatlantis/atlantis/server/core/locking" + "github.com/runatlantis/atlantis/server/core/runtime" "github.com/runatlantis/atlantis/server/events/models" "github.com/runatlantis/atlantis/server/events/vcs" "github.com/runatlantis/atlantis/server/jobs" @@ -56,6 +57,7 @@ type PullClosedExecutor struct { PullClosedTemplate PullCleanupTemplate LogStreamResourceCleaner ResourceCleaner CancellationTracker CancellationTracker + PlanStore runtime.PlanStore } type templatedProject struct { @@ -104,6 +106,12 @@ func (p *PullClosedExecutor) CleanUpPull(logger logging.SimpleLogging, repo mode return fmt.Errorf("cleaning workspace: %w", err) } + if p.PlanStore != nil { + if err := p.PlanStore.DeleteForPull(repo.Owner, repo.Name, pull.Num); err != nil { + logger.Warn("failed to delete plans from external store: %s", err) + } + } + // Finally, delete locks. We do this last because when someone // unlocks a project, right now we don't actually delete the plan // so we might have plans laying around but no locks. diff --git a/server/server.go b/server/server.go index f18a99f5f6..fd9c6cf70f 100644 --- a/server/server.go +++ b/server/server.go @@ -573,19 +573,6 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { Database: database, } - pullClosedExecutor := events.NewInstrumentedPullClosedExecutor( - statsScope, - logger, - &events.PullClosedExecutor{ - Locker: lockingClient, - WorkingDir: workingDir, - Database: database, - PullClosedTemplate: &events.PullClosedEventTemplate{}, - LogStreamResourceCleaner: projectCmdOutputHandler, - VCSClient: vcsClient, - }, - ) - eventParser := &events.EventParser{ GithubUser: userConfig.GithubUser, GithubToken: userConfig.GithubToken, @@ -667,6 +654,20 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { planStore = &runtime.LocalPlanStore{} } + pullClosedExecutor := events.NewInstrumentedPullClosedExecutor( + statsScope, + logger, + &events.PullClosedExecutor{ + Locker: lockingClient, + WorkingDir: workingDir, + Database: database, + PullClosedTemplate: &events.PullClosedEventTemplate{}, + LogStreamResourceCleaner: projectCmdOutputHandler, + VCSClient: vcsClient, + PlanStore: planStore, + }, + ) + projectCommandBuilder := events.NewInstrumentedProjectCommandBuilder( logger, policyChecksEnabled, From bfcd3c8e0ff4ff78192af9002455ba77f3ee472f Mon Sep 17 00:00:00 2001 From: "daanvinken@tythus.com" Date: Fri, 13 Mar 2026 14:45:44 +0100 Subject: [PATCH 5/8] feat: reject stale plans via head-commit metadata check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Store the PR head commit SHA as S3 object metadata on Save. On Load, reject the plan if the stored commit differs from the current PR head. Plans without metadata are also rejected — forces re-plan after upgrade. Signed-off-by: Daan Vinken Signed-off-by: Daan Vinken --- go.mod | 2 +- go.sum | 4 +-- server/core/runtime/s3_plan_store.go | 28 +++++++++++++++-- server/core/runtime/s3_plan_store_test.go | 38 +++++++++++++++++++++-- 4 files changed, 64 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 71717f1002..cef3145f6b 100644 --- a/go.mod +++ b/go.mod @@ -113,7 +113,7 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-github/v75 v75.0.0 // indirect - github.com/google/go-querystring v1.2.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/gorilla/css v1.0.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect diff --git a/go.sum b/go.sum index 11bc9c0d8c..898ed556b7 100644 --- a/go.sum +++ b/go.sum @@ -261,10 +261,10 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-github/v71 v71.0.0 h1:Zi16OymGKZZMm8ZliffVVJ/Q9YZreDKONCr+WUd0Z30= +github.com/google/go-github/v71 v71.0.0/go.mod h1:URZXObp2BLlMjwu0O8g4y6VBneUj2bCHgnI8FfgZ51M= github.com/google/go-github/v75 v75.0.0 h1:k7q8Bvg+W5KxRl9Tjq16a9XEgVY1pwuiG5sIL7435Ic= github.com/google/go-github/v75 v75.0.0/go.mod h1:H3LUJEA1TCrzuUqtdAQniBNwuKiQIqdGKgBo1/M/uqI= -github.com/google/go-github/v83 v83.0.0 h1:Ydy4gAfqxrnFUwXAuKl/OMhhGa0KtMtnJ3EozIIuHT0= -github.com/google/go-github/v83 v83.0.0/go.mod h1:gbqarhK37mpSu8Xy7sz21ITtznvzouyHSAajSaYCHe8= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0= github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU= diff --git a/server/core/runtime/s3_plan_store.go b/server/core/runtime/s3_plan_store.go index bfc0aafad0..8348657345 100644 --- a/server/core/runtime/s3_plan_store.go +++ b/server/core/runtime/s3_plan_store.go @@ -106,10 +106,19 @@ func (s *S3PlanStore) Save(ctx command.ProjectContext, planPath string) error { } defer f.Close() + metadata := map[string]string{} + if ctx.Pull.HeadCommit != "" { + metadata["head-commit"] = ctx.Pull.HeadCommit + } + if ctx.User.Username != "" { + metadata["planned-by"] = ctx.User.Username + } + _, err = s.client.PutObject(context.Background(), &s3.PutObjectInput{ - Bucket: aws.String(s.bucket), - Key: aws.String(key), - Body: f, + Bucket: aws.String(s.bucket), + Key: aws.String(key), + Body: f, + Metadata: metadata, }) if err != nil { return fmt.Errorf("uploading plan to S3 (key=%s): %w", key, err) @@ -132,6 +141,19 @@ func (s *S3PlanStore) Load(ctx command.ProjectContext, planPath string) error { } defer resp.Body.Close() + // Reject stale plans: the plan must have been created at the same commit + // the PR currently points to. This prevents applying outdated plans after + // new commits are pushed (e.g. across pod restarts). + // Note: S3 normalizes user-defined metadata keys to title case in responses, + // so "head-commit" (as written in Save) becomes "Head-Commit" here. + planCommit := resp.Metadata["Head-Commit"] + if planCommit == "" { + return fmt.Errorf("plan in S3 has no head-commit metadata (key=%s) — run plan again", key) + } + if ctx.Pull.HeadCommit != "" && planCommit != ctx.Pull.HeadCommit { + return fmt.Errorf("plan was created at commit %.8s but PR is now at %.8s — run plan again", planCommit, ctx.Pull.HeadCommit) + } + if err := os.MkdirAll(filepath.Dir(planPath), 0o700); err != nil { return fmt.Errorf("creating parent directories for plan file: %w", err) } diff --git a/server/core/runtime/s3_plan_store_test.go b/server/core/runtime/s3_plan_store_test.go index 5a69699034..2aefe64729 100644 --- a/server/core/runtime/s3_plan_store_test.go +++ b/server/core/runtime/s3_plan_store_test.go @@ -29,6 +29,7 @@ type mockS3Client struct { putBody []byte putErr error getBody []byte + getMetadata map[string]string getErr error deleteInput *s3.DeleteObjectInput deleteErr error @@ -73,7 +74,8 @@ func (m *mockS3Client) GetObject(_ context.Context, input *s3.GetObjectInput, _ return nil, errors.New("no such key: " + key) } return &s3.GetObjectOutput{ - Body: io.NopCloser(bytes.NewReader(m.getBody)), + Body: io.NopCloser(bytes.NewReader(m.getBody)), + Metadata: m.getMetadata, }, nil } @@ -144,6 +146,7 @@ func TestSave_Success(t *testing.T) { mock := &mockS3Client{} store := runtime.NewS3PlanStoreWithClient(mock, "my-bucket", "pfx", logging.NewNoopLogger(t)) ctx := testProjectContext() + ctx.Pull.HeadCommit = "abc123def456" planDir := t.TempDir() planPath := filepath.Join(planDir, "test.tfplan") @@ -155,6 +158,7 @@ func TestSave_Success(t *testing.T) { assert.Equal(t, "my-bucket", *mock.putInput.Bucket) assert.Equal(t, "pfx/acme/infra/42/default/modules/vpc/test.tfplan", *mock.putInput.Key) assert.Equal(t, []byte("plan-content"), mock.putBody) + assert.Equal(t, "abc123def456", mock.putInput.Metadata["head-commit"]) } func TestSave_S3Error(t *testing.T) { @@ -180,9 +184,13 @@ func TestSave_FileOpenError(t *testing.T) { func TestLoad_Success(t *testing.T) { planContent := []byte("downloaded-plan-data") - mock := &mockS3Client{getBody: planContent} + mock := &mockS3Client{ + getBody: planContent, + getMetadata: map[string]string{"Head-Commit": "abc123"}, + } store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "pfx", logging.NewNoopLogger(t)) ctx := testProjectContext() + ctx.Pull.HeadCommit = "abc123" planDir := t.TempDir() planPath := filepath.Join(planDir, "subdir", "test.tfplan") @@ -195,6 +203,32 @@ func TestLoad_Success(t *testing.T) { assert.Equal(t, planContent, got) } +func TestLoad_StalePlanRejected(t *testing.T) { + mock := &mockS3Client{ + getBody: []byte("old-plan"), + getMetadata: map[string]string{"Head-Commit": "oldcommit"}, + } + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "", logging.NewNoopLogger(t)) + ctx := testProjectContext() + ctx.Pull.HeadCommit = "newcommit" + + err := store.Load(ctx, filepath.Join(t.TempDir(), "plan.tfplan")) + assert.ErrorContains(t, err, "plan was created at commit oldcommi but PR is now at newcommi") +} + +func TestLoad_MissingMetadataRejected(t *testing.T) { + mock := &mockS3Client{ + getBody: []byte("plan-data"), + getMetadata: map[string]string{}, + } + store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "", logging.NewNoopLogger(t)) + ctx := testProjectContext() + ctx.Pull.HeadCommit = "abc123" + + err := store.Load(ctx, filepath.Join(t.TempDir(), "plan.tfplan")) + assert.ErrorContains(t, err, "no head-commit metadata") +} + func TestLoad_S3Error(t *testing.T) { mock := &mockS3Client{getErr: errors.New("no such key")} store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "", logging.NewNoopLogger(t)) From 4340faac73ab2b7dcd861dccfc9766fdde7f19c4 Mon Sep 17 00:00:00 2001 From: Daan Vinken Date: Mon, 16 Mar 2026 11:14:07 +0100 Subject: [PATCH 6/8] fix: pull request finding and S3 plan store hardening Signed-off-by: Daan Vinken --- server/core/runtime/plan_store.go | 4 +-- server/core/runtime/s3_plan_store.go | 42 ++++++++++++++++++++-------- server/user_config.go | 2 +- 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/server/core/runtime/plan_store.go b/server/core/runtime/plan_store.go index c38a55d880..623c3d092b 100644 --- a/server/core/runtime/plan_store.go +++ b/server/core/runtime/plan_store.go @@ -9,8 +9,8 @@ import ( ) // PlanStore abstracts plan file persistence. -// In Phase 1, LocalPlanStore wraps current filesystem behavior (Save/Load are no-ops). -// In Phase 2, S3PlanStore will upload after plan and download before apply. +// LocalPlanStore wraps current filesystem behavior (Save/Load are no-ops). +// S3PlanStore uploads after plan and downloads before apply. type PlanStore interface { // Save persists a plan file after terraform writes it to planPath. Save(ctx command.ProjectContext, planPath string) error diff --git a/server/core/runtime/s3_plan_store.go b/server/core/runtime/s3_plan_store.go index 8348657345..8110c8e8f7 100644 --- a/server/core/runtime/s3_plan_store.go +++ b/server/core/runtime/s3_plan_store.go @@ -11,6 +11,7 @@ import ( "path/filepath" "strconv" "strings" + "time" "github.com/aws/aws-sdk-go-v2/aws" awsconfig "github.com/aws/aws-sdk-go-v2/config" @@ -56,7 +57,10 @@ func NewS3PlanStore(cfg S3PlanStoreConfig, logger logging.SimpleLogging) (*S3Pla opts = append(opts, awsconfig.WithSharedConfigProfile(cfg.Profile)) } - awsCfg, err := awsconfig.LoadDefaultConfig(context.Background(), opts...) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + awsCfg, err := awsconfig.LoadDefaultConfig(ctx, opts...) if err != nil { return nil, fmt.Errorf("loading AWS config: %w", err) } @@ -77,7 +81,7 @@ func NewS3PlanStore(cfg S3PlanStoreConfig, logger logging.SimpleLogging) (*S3Pla client := s3.NewFromConfig(awsCfg, s3Opts...) - if _, err := client.HeadBucket(context.Background(), &s3.HeadBucketInput{ + if _, err := client.HeadBucket(ctx, &s3.HeadBucketInput{ Bucket: aws.String(cfg.Bucket), }); err != nil { return nil, fmt.Errorf("validating S3 plan store bucket %q: %w", cfg.Bucket, err) @@ -144,9 +148,16 @@ func (s *S3PlanStore) Load(ctx command.ProjectContext, planPath string) error { // Reject stale plans: the plan must have been created at the same commit // the PR currently points to. This prevents applying outdated plans after // new commits are pushed (e.g. across pod restarts). - // Note: S3 normalizes user-defined metadata keys to title case in responses, - // so "head-commit" (as written in Save) becomes "Head-Commit" here. - planCommit := resp.Metadata["Head-Commit"] + // Note: different S3/S3-compatible implementations may return user-defined + // metadata keys with different casing, so we look up "head-commit" + // case-insensitively. + var planCommit string + for k, v := range resp.Metadata { + if strings.EqualFold(k, "head-commit") { + planCommit = v + break + } + } if planCommit == "" { return fmt.Errorf("plan in S3 has no head-commit metadata (key=%s) — run plan again", key) } @@ -176,15 +187,15 @@ func (s *S3PlanStore) Load(ctx command.ProjectContext, planPath string) error { func (s *S3PlanStore) Remove(ctx command.ProjectContext, planPath string) error { key := s.s3Key(ctx, planPath) - _, err := s.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + if _, err := s.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ Bucket: aws.String(s.bucket), Key: aws.String(key), - }) - if err != nil { - return fmt.Errorf("deleting plan from S3 (key=%s): %w", key, err) + }); err != nil { + s.logger.Warn("failed to delete plan from S3 (key=%s): %v", key, err) + } else { + s.logger.Debug("deleted plan from s3://%s/%s", s.bucket, key) } - s.logger.Debug("deleted plan from s3://%s/%s", s.bucket, key) return utils.RemoveIgnoreNonExistent(planPath) } @@ -227,6 +238,14 @@ func (s *S3PlanStore) RestorePlans(pullDir, owner, repo string, pullNum int) err // Strip the prefix up to and including / to get the relative path. relPath := strings.TrimPrefix(key, listPrefix) + relPath = filepath.Clean(relPath) + if relPath == "." || relPath == string(os.PathSeparator) { + s.logger.Info("skipping S3 object with empty relative path (key=%s, prefix=%s)", key, listPrefix) + continue + } + if filepath.IsAbs(relPath) || relPath == ".." || strings.HasPrefix(relPath, ".."+string(os.PathSeparator)) { + return fmt.Errorf("refusing to restore plan outside pull dir (key=%s, relPath=%s)", key, relPath) + } localPath := filepath.Join(pullDir, relPath) if err := os.MkdirAll(filepath.Dir(localPath), 0o700); err != nil { @@ -295,7 +314,8 @@ func (s *S3PlanStore) DeleteForPull(owner, repo string, pullNum int) error { Bucket: aws.String(s.bucket), Key: aws.String(key), }); err != nil { - return fmt.Errorf("deleting plan from S3 (key=%s): %w", key, err) + s.logger.Warn("failed to delete plan from S3 (key=%s): %v", key, err) + continue } deleted++ } diff --git a/server/user_config.go b/server/user_config.go index 51f9b8ba17..69829e7cf3 100644 --- a/server/user_config.go +++ b/server/user_config.go @@ -92,7 +92,7 @@ type UserConfig struct { PendingApplyStatus bool `mapstructure:"pending-apply-status"` StatsNamespace string `mapstructure:"stats-namespace"` PlanDrafts bool `mapstructure:"allow-draft-prs"` - PlanStore string `mapstructure:"plan-store"` + PlanStore string `mapstructure:"plan-store"` PlanStoreS3Bucket string `mapstructure:"plan-store-s3-bucket"` PlanStoreS3Endpoint string `mapstructure:"plan-store-s3-endpoint"` PlanStoreS3ForcePathStyle bool `mapstructure:"plan-store-s3-force-path-style"` From 8562509b6d9fe13755f87201b79ea323e3040320 Mon Sep 17 00:00:00 2001 From: Daan Vinken Date: Mon, 16 Mar 2026 11:43:31 +0100 Subject: [PATCH 7/8] feat: add test for external plan store recovery path Signed-off-by: Daan Vinken --- go.mod | 34 ++--- go.sum | 68 +++++----- server/events/project_command_builder_test.go | 121 ++++++++++++++++++ 3 files changed, 172 insertions(+), 51 deletions(-) diff --git a/go.mod b/go.mod index cef3145f6b..26031ee093 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( code.gitea.io/sdk/gitea v0.23.2 github.com/Masterminds/sprig/v3 v3.3.0 github.com/alicebob/miniredis/v2 v2.36.1 - github.com/aws/aws-sdk-go-v2 v1.41.3 + github.com/aws/aws-sdk-go-v2 v1.41.4 github.com/aws/aws-sdk-go-v2/config v1.32.11 github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 github.com/bmatcuk/doublestar/v4 v4.10.0 @@ -77,21 +77,21 @@ require ( github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect github.com/ProtonMail/gopenpgp/v2 v2.7.5 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.19.11 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 // indirect - github.com/aws/aws-sdk-go-v2/service/signin v1.0.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.12 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.41.8 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.12 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.13 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.9 // indirect github.com/aws/smithy-go v1.24.2 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -113,7 +113,7 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-github/v75 v75.0.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect + github.com/google/go-querystring v1.2.0 // indirect github.com/gorilla/css v1.0.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect diff --git a/go.sum b/go.sum index 898ed556b7..254c621d59 100644 --- a/go.sum +++ b/go.sum @@ -67,42 +67,42 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go-v2 v1.41.3 h1:4kQ/fa22KjDt13QCy1+bYADvdgcxpfH18f0zP542kZA= -github.com/aws/aws-sdk-go-v2 v1.41.3/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6 h1:N4lRUXZpZ1KVEUn6hxtco/1d2lgYhNn1fHkkl8WhlyQ= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI= +github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= +github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI= github.com/aws/aws-sdk-go-v2/config v1.32.11 h1:ftxI5sgz8jZkckuUHXfC/wMUc8u3fG1vQS0plr2F2Zs= github.com/aws/aws-sdk-go-v2/config v1.32.11/go.mod h1:twF11+6ps9aNRKEDimksp923o44w/Thk9+8YIlzWMmo= -github.com/aws/aws-sdk-go-v2/credentials v1.19.11 h1:NdV8cwCcAXrCWyxArt58BrvZJ9pZ9Fhf9w6Uh5W3Uyc= -github.com/aws/aws-sdk-go-v2/credentials v1.19.11/go.mod h1:30yY2zqkMPdrvxBqzI9xQCM+WrlrZKSOpSJEsylVU+8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19 h1:INUvJxmhdEbVulJYHI061k4TVuS3jzzthNvjqvVvTKM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19/go.mod h1:FpZN2QISLdEBWkayloda+sZjVJL+e9Gl0k1SyTgcswU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 h1:/sECfyq2JTifMI2JPyZ4bdRN77zJmr6SrS1eL3augIA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19/go.mod h1:dMf8A5oAqr9/oxOfLkC/c2LU/uMcALP0Rgn2BD5LWn0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 h1:AWeJMk33GTBf6J20XJe6qZoRSJo0WfUhsMdUKhoODXE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19/go.mod h1:+GWrYoaAsV7/4pNHpwh1kiNLXkKaSoppxQq9lbH8Ejw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 h1:clHU5fm//kWS1C2HgtgWxfQbFbx4b6rx+5jzhgX9HrI= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20 h1:qi3e/dmpdONhj1RyIZdi6DKKpDXS5Lb8ftr3p7cyHJc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20/go.mod h1:V1K+TeJVD5JOk3D9e5tsX2KUdL7BlB+FV6cBhdobN8c= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 h1:XAq62tBTJP/85lFD5oqOOe7YYgWxY9LvWq8plyDvDVg= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 h1:BYf7XNsJMzl4mObARUBUib+j2tf0U//JAAtTnYqvqCw= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11/go.mod h1:aEUS4WrNk/+FxkBZZa7tVgp4pGH+kFGW40Y8rCPqt5g= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 h1:X1Tow7suZk9UCJHE1Iw9GMZJJl0dAnKXXP1NaSDHwmw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19/go.mod h1:/rARO8psX+4sfjUQXp5LLifjUt8DuATZ31WptNJTyQA= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 h1:JnQeStZvPHFHeyky/7LbMlyQjUa+jIBj36OlWm0pzIk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19/go.mod h1:HGyasyHvYdFQeJhvDHfH7HXkHh57htcJGKDZ+7z+I24= +github.com/aws/aws-sdk-go-v2/credentials v1.19.12 h1:oqtA6v+y5fZg//tcTWahyN9PEn5eDU/Wpvc2+kJ4aY8= +github.com/aws/aws-sdk-go-v2/credentials v1.19.12/go.mod h1:U3R1RtSHx6NB0DvEQFGyf/0sbrpJrluENHdPy1j/3TE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 h1:zOgq3uezl5nznfoK3ODuqbhVg1JzAGDUhXOsU0IDCAo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20/go.mod h1:z/MVwUARehy6GAg/yQ1GO2IMl0k++cu1ohP9zo887wE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 h1:CNXO7mvgThFGqOFgbNAP2nol2qAWBOGfqR/7tQlvLmc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20/go.mod h1:oydPDJKcfMhgfcgBUZaG+toBbwy8yPWubJXBVERtI4o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 h1:tN6W/hg+pkM+tf9XDkWUbDEjGLb+raoBMFsTodcoYKw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20/go.mod h1:YJ898MhD067hSHA6xYCx5ts/jEd8BSOLtQDL3iZsvbc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21 h1:SwGMTMLIlvDNyhMteQ6r8IJSBPlRdXX5d4idhIGbkXA= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21/go.mod h1:UUxgWxofmOdAMuqEsSppbDtGKLfR04HGsD0HXzvhI1k= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12 h1:qtJZ70afD3ISKWnoX3xB0J2otEqu3LqicRcDBqsj0hQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12/go.mod h1:v2pNpJbRNl4vEUWEh5ytQok0zACAKfdmKS51Hotc3pQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 h1:2HvVAIq+YqgGotK6EkMf+KIEqTISmTYh5zLpYyeTo1Y= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20/go.mod h1:V4X406Y666khGa8ghKmphma/7C0DAtEQYhkq9z4vpbk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 h1:siU1A6xjUZ2N8zjTHSXFhB9L/2OY8Dqs0xXiLjF30jA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20/go.mod h1:4TLZCmVJDM3FOu5P5TJP0zOlu9zWgDWU7aUxWbr+rcw= github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 h1:4ExZyubQ6LQQVuF2Qp9OsfEvsTdAWh5Gfwf6PgIdLdk= github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4/go.mod h1:NF3JcMGOiARAss1ld3WGORCw71+4ExDD2cbbdKS5PpA= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.7 h1:Y2cAXlClHsXkkOvWZFXATr34b0hxxloeQu/pAZz2row= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.7/go.mod h1:idzZ7gmDeqeNrSPkdbtMp9qWMgcBwykA7P7Rzh5DXVU= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.12 h1:iSsvB9EtQ09YrsmIc44Heqlx5ByGErqhPK1ZQLppias= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.12/go.mod h1:fEWYKTRGoZNl8tZ77i61/ccwOMJdGxwOhWCkp6TXAr0= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16 h1:EnUdUqRP1CNzt2DkV67tJx6XDN4xlfBFm+bzeNOQVb0= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16/go.mod h1:Jic/xv0Rq/pFNCh3WwpH4BEqdbSAl+IyHro8LbibHD8= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.8 h1:XQTQTF75vnug2TXS8m7CVJfC2nniYPZnO1D4Np761Oo= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.8/go.mod h1:Xgx+PR1NUOjNmQY+tRMnouRp83JRM8pRMw/vCaVhPkI= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 h1:0GFOLzEbOyZABS3PhYfBIx2rNBACYcKty+XGkTgw1ow= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.8/go.mod h1:LXypKvk85AROkKhOG6/YEcHFPoX+prKTowKnVdcaIxE= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.13 h1:kiIDLZ005EcKomYYITtfsjn7dtOwHDOFy7IbPXKek2o= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.13/go.mod h1:2h/xGEowcW/g38g06g3KpRWDlT+OTfxxI0o1KqayAB8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 h1:jzKAXIlhZhJbnYwHbvUQZEB8KfgAEuG0dc08Bkda7NU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17/go.mod h1:Al9fFsXjv4KfbzQHGe6V4NZSZQXecFcvaIF4e70FoRA= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.9 h1:Cng+OOwCHmFljXIxpEVXAGMnBia8MSU6Ch5i9PgBkcU= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.9/go.mod h1:LrlIndBDdjA/EeXeyNBle+gyCwTlizzW5ycgWnvIxkk= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= @@ -261,10 +261,10 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-github/v71 v71.0.0 h1:Zi16OymGKZZMm8ZliffVVJ/Q9YZreDKONCr+WUd0Z30= -github.com/google/go-github/v71 v71.0.0/go.mod h1:URZXObp2BLlMjwu0O8g4y6VBneUj2bCHgnI8FfgZ51M= github.com/google/go-github/v75 v75.0.0 h1:k7q8Bvg+W5KxRl9Tjq16a9XEgVY1pwuiG5sIL7435Ic= github.com/google/go-github/v75 v75.0.0/go.mod h1:H3LUJEA1TCrzuUqtdAQniBNwuKiQIqdGKgBo1/M/uqI= +github.com/google/go-github/v83 v83.0.0 h1:Ydy4gAfqxrnFUwXAuKl/OMhhGa0KtMtnJ3EozIIuHT0= +github.com/google/go-github/v83 v83.0.0/go.mod h1:gbqarhK37mpSu8Xy7sz21ITtznvzouyHSAajSaYCHe8= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0= github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU= diff --git a/server/events/project_command_builder_test.go b/server/events/project_command_builder_test.go index ae036fc84c..78db108b33 100644 --- a/server/events/project_command_builder_test.go +++ b/server/events/project_command_builder_test.go @@ -1328,6 +1328,127 @@ func TestDefaultProjectCommandBuilder_BuildMultiApply(t *testing.T) { Equals(t, "workspace2", ctxs[3].Workspace) } +// Test that when GetPullDir returns os.ErrNotExist with an external PlanStore, +// the builder re-clones and calls RestorePlans before discovering plans. +func TestDefaultProjectCommandBuilder_ExternalPlanStoreRecovery(t *testing.T) { + RegisterMockTestingT(t) + + // The directory that will exist after "re-clone". + tmpDir := DirStructure(t, map[string]any{ + "default": map[string]any{ + "project1": map[string]any{ + "main.tf": nil, + "default.tfplan": nil, + }, + }, + }) + runCmd(t, filepath.Join(tmpDir, "default"), "git", "init") + + workingDir := mocks.NewMockWorkingDir() + // First GetPullDir call: directory missing (pod restart). + // Second GetPullDir call: directory exists after re-clone. + When(workingDir.GetPullDir( + Any[models.Repo](), + Any[models.PullRequest]())). + ThenReturn("", os.ErrNotExist). + ThenReturn(tmpDir, nil) + + When(workingDir.Clone( + Any[logging.SimpleLogging](), + Any[models.Repo](), + Any[models.PullRequest](), + Any[string]())). + ThenReturn(tmpDir, nil) + + When(workingDir.GetWorkingDir( + Any[models.Repo](), + Any[models.PullRequest](), + Any[string]())). + ThenReturn(tmpDir, nil) + + logger := logging.NewNoopLogger(t) + userConfig := defaultUserConfig + globalCfgArgs := valid.GlobalCfgArgs{} + scope := metricstest.NewLoggingScope(t, logger, "atlantis") + terraformClient := tfclientmocks.NewMockClient() + + restoreCalled := false + planStore := &mockExternalPlanStore{ + restoreFn: func(pullDir, owner, repo string, pullNum int) error { + restoreCalled = true + return nil + }, + } + + builder := events.NewProjectCommandBuilder( + false, + &config.ParserValidator{}, + &events.DefaultProjectFinder{}, + nil, + workingDir, + events.NewDefaultWorkingDirLocker(), + valid.NewGlobalCfgFromArgs(globalCfgArgs), + &events.DefaultPendingPlanFinder{}, + &events.CommentParser{ExecutableName: "atlantis"}, + userConfig.SkipCloneNoChanges, + userConfig.EnableRegExpCmd, + userConfig.EnableAutoMerge, + userConfig.EnableParallelPlan, + userConfig.EnableParallelApply, + userConfig.AutoDetectModuleFiles, + userConfig.AutoplanFileList, + userConfig.RestrictFileList, + userConfig.SilenceNoProjects, + userConfig.IncludeGitUntrackedFiles, + userConfig.AutoDiscoverMode, + scope, + terraformClient, + planStore, + ) + + ctxs, err := builder.BuildApplyCommands( + &command.Context{ + Log: logger, + Scope: scope, + }, + &events.CommentCommand{ + RepoRelDir: "", + Flags: nil, + Name: command.Apply, + Verbose: false, + Workspace: "", + ProjectName: "", + }) + Ok(t, err) + Assert(t, restoreCalled, "expected RestorePlans to be called") + Equals(t, 1, len(ctxs)) + Equals(t, "project1", ctxs[0].RepoRelDir) + + // Verify Clone was called (re-clone after missing pull dir). + workingDir.VerifyWasCalledOnce().Clone( + Any[logging.SimpleLogging](), + Any[models.Repo](), + Any[models.PullRequest](), + Any[string]()) +} + +// mockExternalPlanStore is a non-LocalPlanStore implementation for testing the +// external plan store recovery path. +type mockExternalPlanStore struct { + restoreFn func(pullDir, owner, repo string, pullNum int) error +} + +func (m *mockExternalPlanStore) Save(_ command.ProjectContext, _ string) error { return nil } +func (m *mockExternalPlanStore) Load(_ command.ProjectContext, _ string) error { return nil } +func (m *mockExternalPlanStore) Remove(_ command.ProjectContext, _ string) error { return nil } +func (m *mockExternalPlanStore) RestorePlans(pullDir, owner, repo string, pullNum int) error { + if m.restoreFn != nil { + return m.restoreFn(pullDir, owner, repo, pullNum) + } + return nil +} +func (m *mockExternalPlanStore) DeleteForPull(_, _ string, _ int) error { return nil } + // Test that if a directory has a list of workspaces configured then we don't // allow plans for other workspace names. func TestDefaultProjectCommandBuilder_WrongWorkspaceName(t *testing.T) { From 2e8756a5a4184345b9848ab333de6d66864febfa Mon Sep 17 00:00:00 2001 From: Daan Vinken Date: Wed, 25 Mar 2026 23:04:54 +0100 Subject: [PATCH 8/8] TESRE-8967: consolidate plan store config into server-side repo config Move 7 CLI flags (--plan-store, --plan-store-s3-*) into a single --enable-external-stores flag plus an external_stores block in the server-side repo config YAML. This keeps S3 backend details out of CLI args and alongside the rest of the repo-level configuration. Signed-off-by: Daan Vinken Signed-off-by: Daan Vinken --- cmd/server.go | 46 +--------- cmd/server_test.go | 1 + runatlantis.io/docs/server-configuration.md | 10 +++ server/core/config/raw/global_cfg.go | 83 ++++++++++++++++--- server/core/config/valid/global_cfg.go | 32 +++++-- server/core/runtime/s3_plan_store.go | 2 +- server/core/runtime/s3_plan_store_test.go | 6 +- server/events/project_command_builder.go | 10 +-- server/events/project_command_builder_test.go | 2 +- server/server.go | 36 ++++---- server/user_config.go | 8 +- 11 files changed, 148 insertions(+), 88 deletions(-) diff --git a/cmd/server.go b/cmd/server.go index 12d78ee0bc..64fdb1ef8f 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -127,13 +127,7 @@ const ( PendingApplyStatusFlag = "pending-apply-status" StatsNamespace = "stats-namespace" AllowDraftPRs = "allow-draft-prs" - PlanStoreFlag = "plan-store" - PlanStoreS3BucketFlag = "plan-store-s3-bucket" - PlanStoreS3EndpointFlag = "plan-store-s3-endpoint" - PlanStoreS3ForcePathStyleFlag = "plan-store-s3-force-path-style" - PlanStoreS3PrefixFlag = "plan-store-s3-prefix" - PlanStoreS3ProfileFlag = "plan-store-s3-profile" - PlanStoreS3RegionFlag = "plan-store-s3-region" + EnableExternalStoresFlag = "enable-external-stores" PortFlag = "port" RedisDB = "redis-db" RedisHost = "redis-host" @@ -195,7 +189,6 @@ const ( DefaultMaxCommentsPerCommand = 100 DefaultParallelPoolSize = 15 DefaultStatsNamespace = "atlantis" - DefaultPlanStore = "local" DefaultPort = 4141 DefaultRedisDB = 0 DefaultRedisPort = 6379 @@ -422,25 +415,6 @@ var stringFlags = map[string]stringFlag{ description: "Namespace for aggregating stats.", defaultValue: DefaultStatsNamespace, }, - PlanStoreFlag: { - description: "Plan file storage backend. Supports 'local' (default) or 's3'.", - defaultValue: DefaultPlanStore, - }, - PlanStoreS3BucketFlag: { - description: "S3 bucket name for storing plan files. Required when --plan-store=s3.", - }, - PlanStoreS3EndpointFlag: { - description: "Custom S3 endpoint URL (for S3-compatible stores like MinIO).", - }, - PlanStoreS3PrefixFlag: { - description: "Key prefix for plan files in S3 (e.g. 'atlantis/plans').", - }, - PlanStoreS3ProfileFlag: { - description: "AWS profile to use for S3 plan store authentication.", - }, - PlanStoreS3RegionFlag: { - description: "AWS region for the S3 plan store bucket. Required when --plan-store=s3.", - }, RedisHost: { description: "The Redis Hostname for when using a Locking DB type of 'redis'.", }, @@ -608,8 +582,8 @@ var boolFlags = map[string]boolFlag{ description: "Set apply job status as pending when there are planned changes that haven't been applied yet. Currently only supported for GitLab.", defaultValue: false, }, - PlanStoreS3ForcePathStyleFlag: { - description: "Use S3 path-style addressing (required for MinIO and some S3-compatible stores).", + EnableExternalStoresFlag: { + description: "Enable external storage backends configured in the server-side repo config (external_stores block).", defaultValue: false, }, QuietPolicyChecks: { @@ -1121,20 +1095,6 @@ func (s *ServerCmd) validate(userConfig server.UserConfig) error { } } - switch userConfig.PlanStore { - case "local", "": - // ok - case "s3": - if userConfig.PlanStoreS3Bucket == "" { - return fmt.Errorf("--%s is required when --%s=s3", PlanStoreS3BucketFlag, PlanStoreFlag) - } - if userConfig.PlanStoreS3Region == "" { - return fmt.Errorf("--%s is required when --%s=s3", PlanStoreS3RegionFlag, PlanStoreFlag) - } - default: - return fmt.Errorf("invalid --%s value %q: must be 'local' or 's3'", PlanStoreFlag, userConfig.PlanStore) - } - if userConfig.TFEHostname != DefaultTFEHostname && userConfig.TFEToken == "" { return fmt.Errorf("if setting --%s, must set --%s", TFEHostnameFlag, TFETokenFlag) } diff --git a/cmd/server_test.go b/cmd/server_test.go index d8c2f6e5d9..3e95ba4d61 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -120,6 +120,7 @@ var testFlags = map[string]any{ MaxCommentsPerCommand: 10, StatsNamespace: "atlantis", AllowDraftPRs: true, + EnableExternalStoresFlag: false, PortFlag: 8181, ParallelPoolSize: 100, ParallelPlanFlag: true, diff --git a/runatlantis.io/docs/server-configuration.md b/runatlantis.io/docs/server-configuration.md index 32468a9207..753a2e5776 100644 --- a/runatlantis.io/docs/server-configuration.md +++ b/runatlantis.io/docs/server-configuration.md @@ -561,6 +561,16 @@ Enable Atlantis to format Terraform plan output into a markdown-diff friendly fo Useful to enable for use with GitHub. +### `--enable-external-stores` + +```bash +atlantis server --enable-external-stores +# or +ATLANTIS_ENABLE_EXTERNAL_STORES=true +``` + +Enable external storage backends configured in the server-side repo config (`external_stores` block). When set, Atlantis reads the `external_stores` section from the repo config YAML to initialize backends such as S3 for plan file persistence. + ### `--enable-policy-checks` ```bash diff --git a/server/core/config/raw/global_cfg.go b/server/core/config/raw/global_cfg.go index 133b894a31..ac4ad4251d 100644 --- a/server/core/config/raw/global_cfg.go +++ b/server/core/config/raw/global_cfg.go @@ -16,11 +16,69 @@ import ( // GlobalCfg is the raw schema for server-side repo config. type GlobalCfg struct { - Repos []Repo `yaml:"repos" json:"repos"` - Workflows map[string]Workflow `yaml:"workflows" json:"workflows"` - PolicySets PolicySets `yaml:"policies" json:"policies"` - Metrics Metrics `yaml:"metrics" json:"metrics"` - TeamAuthz TeamAuthz `yaml:"team_authz" json:"team_authz"` + Repos []Repo `yaml:"repos" json:"repos"` + Workflows map[string]Workflow `yaml:"workflows" json:"workflows"` + PolicySets PolicySets `yaml:"policies" json:"policies"` + Metrics Metrics `yaml:"metrics" json:"metrics"` + TeamAuthz TeamAuthz `yaml:"team_authz" json:"team_authz"` + ExternalStores ExternalStores `yaml:"external_stores" json:"external_stores"` +} + +// ExternalStores is the raw schema for external storage backends. +type ExternalStores struct { + PlanStore PlanStoreConfig `yaml:"plan_store" json:"plan_store"` +} + +// PlanStoreConfig is the raw schema for plan storage configuration. +type PlanStoreConfig struct { + Type string `yaml:"type" json:"type"` + S3 S3StoreConfig `yaml:"s3" json:"s3"` +} + +// S3StoreConfig is the raw schema for S3 plan store configuration. +type S3StoreConfig struct { + Bucket string `yaml:"bucket" json:"bucket"` + Region string `yaml:"region" json:"region"` + Prefix string `yaml:"prefix" json:"prefix"` + Endpoint string `yaml:"endpoint" json:"endpoint"` + ForcePathStyle bool `yaml:"force_path_style" json:"force_path_style"` + Profile string `yaml:"profile" json:"profile"` +} + +func (e ExternalStores) Validate() error { + return e.PlanStore.Validate() +} + +func (p PlanStoreConfig) Validate() error { + if p.Type == "" { + return nil + } + if p.Type != "s3" { + return fmt.Errorf("unsupported plan store type %q: only 's3' is supported", p.Type) + } + if p.S3.Bucket == "" { + return fmt.Errorf("external_stores.plan_store.s3.bucket is required when type is 's3'") + } + if p.S3.Region == "" { + return fmt.Errorf("external_stores.plan_store.s3.region is required when type is 's3'") + } + return nil +} + +func (e ExternalStores) ToValid() valid.ExternalStores { + return valid.ExternalStores{ + PlanStore: valid.PlanStoreConfig{ + Type: e.PlanStore.Type, + S3: valid.S3StoreConfig{ + Bucket: e.PlanStore.S3.Bucket, + Region: e.PlanStore.S3.Region, + Prefix: e.PlanStore.S3.Prefix, + Endpoint: e.PlanStore.S3.Endpoint, + ForcePathStyle: e.PlanStore.S3.ForcePathStyle, + Profile: e.PlanStore.S3.Profile, + }, + }, + } } // Repo is the raw schema for repos in the server-side repo config. @@ -56,6 +114,10 @@ func (g GlobalCfg) Validate() error { return err } + if err := g.ExternalStores.Validate(); err != nil { + return err + } + // Check that all workflows referenced by repos are actually defined. for _, repo := range g.Repos { if repo.Workflow == nil { @@ -161,11 +223,12 @@ func (g GlobalCfg) ToValid(defaultCfg valid.GlobalCfg) valid.GlobalCfg { repos = append(defaultCfg.Repos, repos...) return valid.GlobalCfg{ - Repos: repos, - Workflows: workflows, - PolicySets: g.PolicySets.ToValid(), - Metrics: g.Metrics.ToValid(), - TeamAuthz: g.TeamAuthz.ToValid(), + Repos: repos, + Workflows: workflows, + PolicySets: g.PolicySets.ToValid(), + Metrics: g.Metrics.ToValid(), + TeamAuthz: g.TeamAuthz.ToValid(), + ExternalStores: g.ExternalStores.ToValid(), } } diff --git a/server/core/config/valid/global_cfg.go b/server/core/config/valid/global_cfg.go index 384cf503ca..47d0a28acf 100644 --- a/server/core/config/valid/global_cfg.go +++ b/server/core/config/valid/global_cfg.go @@ -46,11 +46,33 @@ var NonOverridableApplyReqs = []string{PoliciesPassedCommandReq} // GlobalCfg is the final parsed version of server-side repo config. type GlobalCfg struct { - Repos []Repo - Workflows map[string]Workflow - PolicySets PolicySets - Metrics Metrics - TeamAuthz TeamAuthz + Repos []Repo + Workflows map[string]Workflow + PolicySets PolicySets + Metrics Metrics + TeamAuthz TeamAuthz + ExternalStores ExternalStores +} + +// ExternalStores holds configuration for external storage backends. +type ExternalStores struct { + PlanStore PlanStoreConfig +} + +// PlanStoreConfig holds the type and backend-specific config for plan storage. +type PlanStoreConfig struct { + Type string + S3 S3StoreConfig +} + +// S3StoreConfig holds S3-specific configuration for the plan store. +type S3StoreConfig struct { + Bucket string + Region string + Prefix string + Endpoint string + ForcePathStyle bool + Profile string } type Metrics struct { diff --git a/server/core/runtime/s3_plan_store.go b/server/core/runtime/s3_plan_store.go index 8110c8e8f7..10f210293c 100644 --- a/server/core/runtime/s3_plan_store.go +++ b/server/core/runtime/s3_plan_store.go @@ -147,7 +147,7 @@ func (s *S3PlanStore) Load(ctx command.ProjectContext, planPath string) error { // Reject stale plans: the plan must have been created at the same commit // the PR currently points to. This prevents applying outdated plans after - // new commits are pushed (e.g. across pod restarts). + // new commits are pushed (e.g. across container restarts). // Note: different S3/S3-compatible implementations may return user-defined // metadata keys with different casing, so we look up "head-commit" // case-insensitively. diff --git a/server/core/runtime/s3_plan_store_test.go b/server/core/runtime/s3_plan_store_test.go index 2aefe64729..f686a79c0b 100644 --- a/server/core/runtime/s3_plan_store_test.go +++ b/server/core/runtime/s3_plan_store_test.go @@ -262,8 +262,9 @@ func TestRemove_S3Error(t *testing.T) { store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "", logging.NewNoopLogger(t)) ctx := testProjectContext() + // S3 delete errors are logged but not returned (soft-fail). err := store.Remove(ctx, "/tmp/whatever.tfplan") - assert.ErrorContains(t, err, "forbidden") + assert.NoError(t, err) } func TestRemove_LocalFileAlreadyGone(t *testing.T) { @@ -399,6 +400,7 @@ func TestDeleteForPull_DeleteError(t *testing.T) { } store := runtime.NewS3PlanStoreWithClient(mock, "bucket", "pfx", logging.NewNoopLogger(t)) + // S3 delete errors during cleanup are logged but not returned (soft-fail). err := store.DeleteForPull("acme", "infra", 42) - assert.ErrorContains(t, err, "forbidden") + assert.NoError(t, err) } diff --git a/server/events/project_command_builder.go b/server/events/project_command_builder.go index 8952f719f2..d9b392f0a6 100644 --- a/server/events/project_command_builder.go +++ b/server/events/project_command_builder.go @@ -229,7 +229,7 @@ type DefaultProjectCommandBuilder struct { GlobalCfg valid.GlobalCfg // Finds unapplied plans. PendingPlanFinder *DefaultPendingPlanFinder - // Persists plan files to external storage (S3) so they survive pod restarts. + // Persists plan files to external storage (S3) so they survive container restarts. PlanStore runtime.PlanStore // Builds project command contexts for Atlantis commands. ProjectCommandContextBuilder ProjectCommandContextBuilder @@ -810,8 +810,8 @@ func (p *DefaultProjectCommandBuilder) buildAllProjectCommandsByPlan(ctx *comman if _, isLocal := p.PlanStore.(*runtime.LocalPlanStore); isLocal { return nil, err } - // External plan store: working directory lost (e.g. pod restart with - // emptyDir). Re-clone and restore plan files from the external store. + // External plan store: working directory lost (e.g. container restart + // with emptyDir). Re-clone and restore plan files from the external store. ctx.Log.Info("pull directory missing, re-cloning repo for apply") if _, cloneErr := p.WorkingDir.Clone(ctx.Log, ctx.HeadRepo, ctx.Pull, DefaultWorkspace); cloneErr != nil { return nil, fmt.Errorf("re-cloning repo for apply: %w", cloneErr) @@ -881,8 +881,8 @@ func (p *DefaultProjectCommandBuilder) buildProjectCommand(ctx *command.Context, if _, isLocal := p.PlanStore.(*runtime.LocalPlanStore); isLocal { return projCtx, errors.New("no working directory found–did you run plan?") } - // External plan store: working directory lost (e.g. pod restart with - // emptyDir). Re-clone; the plan file will be loaded from the external + // External plan store: working directory lost (e.g. container restart + // with emptyDir). Re-clone; the plan file will be loaded from the external // store during the apply step. ctx.Log.Info("working directory missing, re-cloning repo for apply") repoDir, err = p.WorkingDir.Clone(ctx.Log, ctx.HeadRepo, ctx.Pull, workspace) diff --git a/server/events/project_command_builder_test.go b/server/events/project_command_builder_test.go index 78db108b33..64b1d7fe0f 100644 --- a/server/events/project_command_builder_test.go +++ b/server/events/project_command_builder_test.go @@ -1345,7 +1345,7 @@ func TestDefaultProjectCommandBuilder_ExternalPlanStoreRecovery(t *testing.T) { runCmd(t, filepath.Join(tmpDir, "default"), "git", "init") workingDir := mocks.NewMockWorkingDir() - // First GetPullDir call: directory missing (pod restart). + // First GetPullDir call: directory missing (container restart). // Second GetPullDir call: directory exists after re-clone. When(workingDir.GetPullDir( Any[models.Repo](), diff --git a/server/server.go b/server/server.go index fd9c6cf70f..475d94ae75 100644 --- a/server/server.go +++ b/server/server.go @@ -636,21 +636,29 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { Router: router, } var planStore runtime.PlanStore - switch userConfig.PlanStore { - case "s3": - logger.Info("initializing S3 plan store (bucket=%s, region=%s)", userConfig.PlanStoreS3Bucket, userConfig.PlanStoreS3Region) - planStore, err = runtime.NewS3PlanStore(runtime.S3PlanStoreConfig{ - Bucket: userConfig.PlanStoreS3Bucket, - Region: userConfig.PlanStoreS3Region, - Prefix: userConfig.PlanStoreS3Prefix, - Endpoint: userConfig.PlanStoreS3Endpoint, - ForcePathStyle: userConfig.PlanStoreS3ForcePathStyle, - Profile: userConfig.PlanStoreS3Profile, - }, logger) - if err != nil { - return nil, fmt.Errorf("initializing S3 plan store: %w", err) + if userConfig.EnableExternalStores { + psCfg := globalCfg.ExternalStores.PlanStore + if psCfg.Type == "" { + return nil, fmt.Errorf("--enable-external-stores is set but no external_stores.plan_store.type is configured in the server-side repo config") } - default: + switch psCfg.Type { + case "s3": + logger.Info("initializing S3 plan store (bucket=%s, region=%s)", psCfg.S3.Bucket, psCfg.S3.Region) + planStore, err = runtime.NewS3PlanStore(runtime.S3PlanStoreConfig{ + Bucket: psCfg.S3.Bucket, + Region: psCfg.S3.Region, + Prefix: psCfg.S3.Prefix, + Endpoint: psCfg.S3.Endpoint, + ForcePathStyle: psCfg.S3.ForcePathStyle, + Profile: psCfg.S3.Profile, + }, logger) + if err != nil { + return nil, fmt.Errorf("initializing S3 plan store: %w", err) + } + default: + return nil, fmt.Errorf("unsupported plan store type %q", psCfg.Type) + } + } else { planStore = &runtime.LocalPlanStore{} } diff --git a/server/user_config.go b/server/user_config.go index 69829e7cf3..f0a33d1b8b 100644 --- a/server/user_config.go +++ b/server/user_config.go @@ -92,13 +92,7 @@ type UserConfig struct { PendingApplyStatus bool `mapstructure:"pending-apply-status"` StatsNamespace string `mapstructure:"stats-namespace"` PlanDrafts bool `mapstructure:"allow-draft-prs"` - PlanStore string `mapstructure:"plan-store"` - PlanStoreS3Bucket string `mapstructure:"plan-store-s3-bucket"` - PlanStoreS3Endpoint string `mapstructure:"plan-store-s3-endpoint"` - PlanStoreS3ForcePathStyle bool `mapstructure:"plan-store-s3-force-path-style"` - PlanStoreS3Prefix string `mapstructure:"plan-store-s3-prefix"` - PlanStoreS3Profile string `mapstructure:"plan-store-s3-profile"` - PlanStoreS3Region string `mapstructure:"plan-store-s3-region"` + EnableExternalStores bool `mapstructure:"enable-external-stores"` Port int `mapstructure:"port"` QuietPolicyChecks bool `mapstructure:"quiet-policy-checks"` RedisDB int `mapstructure:"redis-db"`