diff --git a/atc/api/builds_test.go b/atc/api/builds_test.go index bf62c196c..5ad60c266 100644 --- a/atc/api/builds_test.go +++ b/atc/api/builds_test.go @@ -1339,8 +1339,21 @@ var _ = Describe("Builds API", func() { BeforeEach(func() { fakeJob.PublicReturns(true) }) - It("returns 200", func() { - Expect(response.StatusCode).To(Equal(http.StatusOK)) + Context("and the build has a plan", func() { + BeforeEach(func() { + build.HasPlanReturns(true) + }) + It("returns 200", func() { + Expect(response.StatusCode).To(Equal(http.StatusOK)) + }) + }) + Context("and the build has no plan", func() { + BeforeEach(func() { + build.HasPlanReturns(false) + }) + It("returns 404", func() { + Expect(response.StatusCode).To(Equal(http.StatusNotFound)) + }) }) }) @@ -1364,6 +1377,7 @@ var _ = Describe("Builds API", func() { Context("when the build returns a plan", func() { BeforeEach(func() { + build.HasPlanReturns(true) build.PublicPlanReturns(plan) build.SchemaReturns("some-schema") }) @@ -1386,6 +1400,20 @@ var _ = Describe("Builds API", func() { }`)) }) }) + + Context("when the build has no plan", func() { + BeforeEach(func() { + build.HasPlanReturns(false) + }) + + It("returns no Content-Type header", func() { + Expect(response.Header.Get("Content-Type")).To(Equal("")) + }) + + It("returns not found", func() { + Expect(response.StatusCode).To(Equal(http.StatusNotFound)) + }) + }) }) }) diff --git a/atc/api/buildserver/plan.go b/atc/api/buildserver/plan.go index 3c80652fa..7acdb6257 100644 --- a/atc/api/buildserver/plan.go +++ b/atc/api/buildserver/plan.go @@ -12,6 +12,10 @@ func (s *Server) GetBuildPlan(build db.Build) http.Handler { hLog := s.logger.Session("get-build-plan") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !build.HasPlan() { + w.WriteHeader(http.StatusNotFound) + return + } w.Header().Set("Content-Type", "application/json") err := json.NewEncoder(w).Encode(atc.PublicBuildPlan{ Schema: build.Schema(), diff --git a/atc/api/containers_test.go b/atc/api/containers_test.go index 44dd5dab8..414152856 100644 --- a/atc/api/containers_test.go +++ b/atc/api/containers_test.go @@ -362,7 +362,7 @@ var _ = Describe("Containers API", func() { _, err := client.Do(req) Expect(err).NotTo(HaveOccurred()) - _, pipelineName, resourceName, secretManager := dbTeam.FindCheckContainersArgsForCall(0) + pipelineName, resourceName, secretManager := dbTeam.FindCheckContainersArgsForCall(0) Expect(pipelineName).To(Equal("some-pipeline")) Expect(resourceName).To(Equal("some-resource")) Expect(secretManager).To(Equal(fakeSecretManager)) diff --git a/atc/api/containerserver/list.go b/atc/api/containerserver/list.go index f8868ca71..52d7ebc88 100644 --- a/atc/api/containerserver/list.go +++ b/atc/api/containerserver/list.go @@ -32,7 +32,7 @@ func (s *Server) ListContainers(team db.Team) http.Handler { hLog.Debug("listing-containers") - containers, checkContainersExpiresAt, err := containerLocator.Locate(hLog) + containers, checkContainersExpiresAt, err := containerLocator.Locate() if err != nil { hLog.Error("failed-to-find-containers", err) w.WriteHeader(http.StatusInternalServerError) @@ -56,7 +56,7 @@ func (s *Server) ListContainers(team db.Team) http.Handler { } type containerLocator interface { - Locate(logger lager.Logger) ([]db.Container, map[int]time.Time, error) + Locate() ([]db.Container, map[int]time.Time, error) } func createContainerLocatorFromRequest(team db.Team, r *http.Request, secretManager creds.Secrets) (containerLocator, error) { @@ -126,8 +126,8 @@ type allContainersLocator struct { team db.Team } -func (l *allContainersLocator) Locate(logger lager.Logger) ([]db.Container, map[int]time.Time, error) { - containers, err := l.team.Containers(logger) +func (l *allContainersLocator) Locate() ([]db.Container, map[int]time.Time, error) { + containers, err := l.team.Containers() return containers, nil, err } @@ -138,8 +138,8 @@ type checkContainerLocator struct { secretManager creds.Secrets } -func (l *checkContainerLocator) Locate(logger lager.Logger) ([]db.Container, map[int]time.Time, error) { - return l.team.FindCheckContainers(logger, l.pipelineName, l.resourceName, l.secretManager) +func (l *checkContainerLocator) Locate() ([]db.Container, map[int]time.Time, error) { + return l.team.FindCheckContainers(l.pipelineName, l.resourceName, l.secretManager) } type stepContainerLocator struct { @@ -147,7 +147,7 @@ type stepContainerLocator struct { metadata db.ContainerMetadata } -func (l *stepContainerLocator) Locate(logger lager.Logger) ([]db.Container, map[int]time.Time, error) { +func (l *stepContainerLocator) Locate() ([]db.Container, map[int]time.Time, error) { containers, err := l.team.FindContainersByMetadata(l.metadata) return containers, nil, err } diff --git a/atc/atccmd/command.go b/atc/atccmd/command.go index a72ac762c..0fabc8755 100644 --- a/atc/atccmd/command.go +++ b/atc/atccmd/command.go @@ -48,7 +48,6 @@ import ( "github.com/concourse/concourse/skymarshal/skycmd" "github.com/concourse/concourse/skymarshal/storage" "github.com/concourse/concourse/web" - "github.com/concourse/concourse/web/indexhandler" "github.com/concourse/flag" "github.com/concourse/retryhttp" "github.com/cppforlife/go-semi-semantic/version" @@ -562,7 +561,7 @@ func (cmd *RunCommand) constructAPIMembers( cmd.BaggageclaimResponseHeaderTimeout, ) - pool := worker.NewPool(workerProvider) + pool := worker.NewPool(clock.NewClock(), lockFactory, workerProvider) workerClient := worker.NewClient(pool, workerProvider) checkContainerStrategy := worker.NewRandomPlacementStrategy() @@ -611,7 +610,6 @@ func (cmd *RunCommand) constructAPIMembers( return nil, err } - indexhandler.ClusterName = cmd.Server.ClusterName webHandler, err := webHandler(logger) if err != nil { return nil, err @@ -741,7 +739,7 @@ func (cmd *RunCommand) constructBackendMembers( cmd.BaggageclaimResponseHeaderTimeout, ) - pool := worker.NewPool(workerProvider) + pool := worker.NewPool(clock.NewClock(), lockFactory, workerProvider) workerClient := worker.NewClient(pool, workerProvider) defaultLimits, err := cmd.parseDefaultLimits() diff --git a/atc/builds/tracker.go b/atc/builds/tracker.go index 41e178303..106c0f015 100644 --- a/atc/builds/tracker.go +++ b/atc/builds/tracker.go @@ -43,8 +43,8 @@ func (bt *Tracker) Track() { "job": build.JobName(), }) - engineBuild := bt.engine.LookupBuild(btLog, build) - go engineBuild.Resume(btLog) + engineBuild := bt.engine.NewBuild(build) + go engineBuild.Run(btLog) } } diff --git a/atc/builds/tracker_test.go b/atc/builds/tracker_test.go index 55f851867..03dbdf4f9 100644 --- a/atc/builds/tracker_test.go +++ b/atc/builds/tracker_test.go @@ -1,7 +1,6 @@ package builds_test import ( - "code.cloudfoundry.org/lager" "code.cloudfoundry.org/lager/lagertest" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -37,7 +36,7 @@ var _ = Describe("Tracker", func() { Describe("Track", func() { var inFlightBuilds []*dbfakes.FakeBuild - var engineBuilds []*enginefakes.FakeBuild + var engineBuilds []*enginefakes.FakeRunnable BeforeEach(func() { inFlightBuilds = []*dbfakes.FakeBuild{ @@ -53,9 +52,9 @@ var _ = Describe("Tracker", func() { fakeBuildFactory.GetAllStartedBuildsReturns(returnedBuilds, nil) - engineBuilds = []*enginefakes.FakeBuild{} - fakeEngine.LookupBuildStub = func(logger lager.Logger, build db.Build) engine.Build { - engineBuild := new(enginefakes.FakeBuild) + engineBuilds = []*enginefakes.FakeRunnable{} + fakeEngine.NewBuildStub = func(build db.Build) engine.Runnable { + engineBuild := new(enginefakes.FakeRunnable) engineBuilds = append(engineBuilds, engineBuild) return engineBuild } @@ -64,9 +63,9 @@ var _ = Describe("Tracker", func() { It("resumes all currently in-flight builds", func() { tracker.Track() - Eventually(engineBuilds[0].ResumeCallCount).Should(Equal(1)) - Eventually(engineBuilds[1].ResumeCallCount).Should(Equal(1)) - Eventually(engineBuilds[2].ResumeCallCount).Should(Equal(1)) + Eventually(engineBuilds[0].RunCallCount).Should(Equal(1)) + Eventually(engineBuilds[1].RunCallCount).Should(Equal(1)) + Eventually(engineBuilds[2].RunCallCount).Should(Equal(1)) }) }) diff --git a/atc/creds/versioned_resource_type.go b/atc/creds/versioned_resource_type.go index 496932077..067d97082 100644 --- a/atc/creds/versioned_resource_type.go +++ b/atc/creds/versioned_resource_type.go @@ -43,3 +43,24 @@ func (types VersionedResourceTypes) Without(name string) VersionedResourceTypes return newTypes } + +func (types VersionedResourceTypes) Evaluate() (atc.VersionedResourceTypes, error) { + + var rawTypes atc.VersionedResourceTypes + for _, t := range types { + source, err := t.Source.Evaluate() + if err != nil { + return nil, err + } + + resourceType := t.ResourceType + resourceType.Source = source + + rawTypes = append(rawTypes, atc.VersionedResourceType{ + ResourceType: resourceType, + Version: t.Version, + }) + } + + return rawTypes, nil +} diff --git a/atc/db/build.go b/atc/db/build.go index 8399d6369..cf05b75dc 100644 --- a/atc/db/build.go +++ b/atc/db/build.go @@ -11,7 +11,6 @@ import ( "code.cloudfoundry.org/lager" sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db/encryption" "github.com/concourse/concourse/atc/db/lock" "github.com/concourse/concourse/atc/event" @@ -67,6 +66,7 @@ type Build interface { Schema() string PrivatePlan() atc.Plan PublicPlan() *json.RawMessage + HasPlan() bool Status() BuildStatus StartTime() time.Time CreateTime() time.Time @@ -95,7 +95,7 @@ type Build interface { Artifacts() ([]WorkerArtifact, error) Artifact(artifactID int) (WorkerArtifact, error) - SaveOutput(lager.Logger, string, atc.Source, creds.VersionedResourceTypes, atc.Version, ResourceConfigMetadataFields, string, string) error + SaveOutput(string, atc.Source, atc.VersionedResourceTypes, atc.Version, ResourceConfigMetadataFields, string, string) error UseInputs(inputs []BuildInput) error Resources() ([]BuildInput, []BuildOutput, error) @@ -170,6 +170,7 @@ func (b *build) IsManuallyTriggered() bool { return b.isManuallyTriggered } func (b *build) Schema() string { return b.schema } func (b *build) PrivatePlan() atc.Plan { return b.privatePlan } func (b *build) PublicPlan() *json.RawMessage { return b.publicPlan } +func (b *build) HasPlan() bool { return string(*b.publicPlan) != "{}" } func (b *build) CreateTime() time.Time { return b.createTime } func (b *build) StartTime() time.Time { return b.startTime } func (b *build) EndTime() time.Time { return b.endTime } @@ -634,9 +635,29 @@ func (b *build) Preparation() (BuildPreparation, bool, error) { missingInputReasons := MissingInputReasons{} if found { + inputsSatisfiedStatus = BuildPreparationStatusNotBlocking - for _, buildInput := range nextBuildInputs { - inputs[buildInput.Name] = BuildPreparationStatusNotBlocking + + if b.IsManuallyTriggered() { + for _, buildInput := range nextBuildInputs { + resource, _, err := pipeline.ResourceByID(buildInput.ResourceID) + if err != nil { + return BuildPreparation{}, false, err + } + + // input is blocking if its last check time is before build create time + if resource.LastCheckEndTime().Before(b.CreateTime()) { + inputs[buildInput.Name] = BuildPreparationStatusBlocking + missingInputReasons.RegisterNoResourceCheckFinished(buildInput.Name) + inputsSatisfiedStatus = BuildPreparationStatusBlocking + } else { + inputs[buildInput.Name] = BuildPreparationStatusNotBlocking + } + } + } else { + for _, buildInput := range nextBuildInputs { + inputs[buildInput.Name] = BuildPreparationStatusNotBlocking + } } } else { buildInputs, err := job.GetIndependentBuildInputs() @@ -808,10 +829,9 @@ func (b *build) Artifacts() ([]WorkerArtifact, error) { } func (b *build) SaveOutput( - logger lager.Logger, resourceType string, source atc.Source, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, version atc.Version, metadata ResourceConfigMetadataFields, outputName string, @@ -854,7 +874,7 @@ func (b *build) SaveOutput( return err } - resourceConfig, err := resourceConfigDescriptor.findOrCreate(logger, tx, b.lockFactory, b.conn) + resourceConfig, err := resourceConfigDescriptor.findOrCreate(tx, b.lockFactory, b.conn) if err != nil { return err } diff --git a/atc/db/build_preparation.go b/atc/db/build_preparation.go index 771156324..139f1c7c6 100644 --- a/atc/db/build_preparation.go +++ b/atc/db/build_preparation.go @@ -15,6 +15,7 @@ type MissingInputReasons map[string]string const ( NoVersionsSatisfiedPassedConstraints string = "no versions satisfy passed constraints" NoVersionsAvailable string = "no versions available" + NoResourceCheckFinished string = "checking for latest available versions" PinnedVersionUnavailable string = "pinned version %s is not available" ) @@ -26,6 +27,10 @@ func (mir MissingInputReasons) RegisterNoVersions(inputName string) { mir[inputName] = NoVersionsAvailable } +func (mir MissingInputReasons) RegisterNoResourceCheckFinished(inputName string) { + mir[inputName] = NoResourceCheckFinished +} + func (mir MissingInputReasons) RegisterPinnedVersionUnavailable(inputName string, version string) { mir[inputName] = fmt.Sprintf(PinnedVersionUnavailable, version) } diff --git a/atc/db/build_test.go b/atc/db/build_test.go index 4627f6756..29f513009 100644 --- a/atc/db/build_test.go +++ b/atc/db/build_test.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/algorithm" "github.com/concourse/concourse/atc/event" @@ -24,6 +23,13 @@ var _ = Describe("Build", func() { Expect(err).ToNot(HaveOccurred()) }) + It("has no plan on creation", func() { + var err error + build, err := team.CreateOneOffBuild() + Expect(err).ToNot(HaveOccurred()) + Expect(build.HasPlan()).To(BeFalse()) + }) + Describe("Reload", func() { It("updates the model", func() { build, err := team.CreateOneOffBuild() @@ -157,6 +163,7 @@ var _ = Describe("Build", func() { found, err := build.Reload() Expect(err).NotTo(HaveOccurred()) Expect(found).To(BeTrue()) + Expect(build.HasPlan()).To(BeTrue()) Expect(build.PublicPlan()).To(Equal(plan.Public())) }) }) @@ -407,7 +414,7 @@ var _ = Describe("Build", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceConfigScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "explicit-source"}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = resource.SetResourceConfig(atc.Source{"some": "explicit-source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) }) @@ -416,7 +423,7 @@ var _ = Describe("Build", func() { build, err := job.CreateBuild() Expect(err).ToNot(HaveOccurred()) - err = build.SaveOutput(logger, "some-type", atc.Source{"some": "explicit-source"}, creds.VersionedResourceTypes{}, atc.Version{"some": "version"}, []db.ResourceConfigMetadataField{ + err = build.SaveOutput("some-type", atc.Source{"some": "explicit-source"}, atc.VersionedResourceTypes{}, atc.Version{"some": "version"}, []db.ResourceConfigMetadataField{ { Name: "meta1", Value: "data1", @@ -457,7 +464,7 @@ var _ = Describe("Build", func() { build, err := job.CreateBuild() Expect(err).ToNot(HaveOccurred()) - err = build.SaveOutput(logger, "some-type", atc.Source{"some": "explicit-source"}, creds.VersionedResourceTypes{}, atc.Version{"some": "version"}, []db.ResourceConfigMetadataField{ + err = build.SaveOutput("some-type", atc.Source{"some": "explicit-source"}, atc.VersionedResourceTypes{}, atc.Version{"some": "version"}, []db.ResourceConfigMetadataField{ { Name: "meta1", Value: "data1", @@ -539,10 +546,10 @@ var _ = Describe("Build", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceConfigScope1, err = resource1.SetResourceConfig(logger, atc.Source{"some": "source-1"}, creds.VersionedResourceTypes{}) + resourceConfigScope1, err = resource1.SetResourceConfig(atc.Source{"some": "source-1"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) - _, err = resource2.SetResourceConfig(logger, atc.Source{"some": "source-2"}, creds.VersionedResourceTypes{}) + _, err = resource2.SetResourceConfig(atc.Source{"some": "source-2"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceConfigScope1.SaveVersions([]atc.Version{ @@ -552,7 +559,7 @@ var _ = Describe("Build", func() { Expect(err).ToNot(HaveOccurred()) // This version should not be returned by the Resources method because it has a check order of 0 - created, err := resource1.SaveUncheckedVersion(atc.Version{"ver": "not-returned"}, nil, resourceConfigScope1.ResourceConfig(), creds.VersionedResourceTypes{}) + created, err := resource1.SaveUncheckedVersion(atc.Version{"ver": "not-returned"}, nil, resourceConfigScope1.ResourceConfig(), atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) Expect(created).To(BeTrue()) }) @@ -572,7 +579,7 @@ var _ = Describe("Build", func() { Expect(err).NotTo(HaveOccurred()) // save explicit output from 'put' - err = build.SaveOutput(logger, "some-type", atc.Source{"some": "source-2"}, creds.VersionedResourceTypes{}, atc.Version{"ver": "2"}, nil, "some-output-name", "some-other-resource") + err = build.SaveOutput("some-type", atc.Source{"some": "source-2"}, atc.VersionedResourceTypes{}, atc.Version{"ver": "2"}, nil, "some-output-name", "some-other-resource") Expect(err).NotTo(HaveOccurred()) inputs, outputs, err := build.Resources() @@ -751,6 +758,14 @@ var _ = Describe("Build", func() { }) Context("when inputs are satisfied", func() { + var ( + resourceConfigScope db.ResourceConfigScope + resource db.Resource + found bool + rcv db.ResourceConfigVersion + err error + ) + BeforeEach(func() { setupTx, err := dbConn.Begin() Expect(err).ToNot(HaveOccurred()) @@ -763,109 +778,148 @@ var _ = Describe("Build", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - resource, found, err := pipeline.Resource("some-resource") + resource, found, err = pipeline.Resource("some-resource") Expect(err).NotTo(HaveOccurred()) Expect(found).To(BeTrue()) - resourceConfigScope, err := resource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = resource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) err = resourceConfigScope.SaveVersions([]atc.Version{{"version": "v5"}}) Expect(err).NotTo(HaveOccurred()) - rcv, found, err := resourceConfigScope.FindVersion(atc.Version{"version": "v5"}) + rcv, found, err = resourceConfigScope.FindVersion(atc.Version{"version": "v5"}) Expect(found).To(BeTrue()) Expect(err).NotTo(HaveOccurred()) - - err = job.SaveNextInputMapping(algorithm.InputMapping{ - "some-input": {VersionID: rcv.ID(), ResourceID: resource.ID(), FirstOccurrence: true}, - }) - Expect(err).NotTo(HaveOccurred()) - - expectedBuildPrep.Inputs = map[string]db.BuildPreparationStatus{ - "some-input": db.BuildPreparationStatusNotBlocking, - } }) - Context("when the build is started", func() { + Context("when resource check finished after build created", func() { BeforeEach(func() { - started, err := build.Start(atc.Plan{}) - Expect(started).To(BeTrue()) + updated, err := resourceConfigScope.UpdateLastCheckEndTime() + Expect(err).NotTo(HaveOccurred()) + Expect(updated).To(BeTrue()) + + reloaded, err := resource.Reload() + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue()) + + lastCheckEndTime := resource.LastCheckEndTime() + Expect(lastCheckEndTime.IsZero()).To(BeFalse()) + + err = job.SaveNextInputMapping(algorithm.InputMapping{ + "some-input": {VersionID: rcv.ID(), ResourceID: resource.ID(), FirstOccurrence: true}, + }) Expect(err).NotTo(HaveOccurred()) - stillExists, err := build.Reload() - Expect(stillExists).To(BeTrue()) - Expect(err).NotTo(HaveOccurred()) - - expectedBuildPrep.Inputs = map[string]db.BuildPreparationStatus{} + expectedBuildPrep.Inputs = map[string]db.BuildPreparationStatus{ + "some-input": db.BuildPreparationStatusNotBlocking, + } }) - It("returns build preparation", func() { - buildPrep, found, err := build.Preparation() - Expect(err).NotTo(HaveOccurred()) - Expect(found).To(BeTrue()) - Expect(buildPrep).To(Equal(expectedBuildPrep)) + Context("when the build is started", func() { + BeforeEach(func() { + started, err := build.Start(atc.Plan{}) + Expect(started).To(BeTrue()) + Expect(err).NotTo(HaveOccurred()) + + stillExists, err := build.Reload() + Expect(stillExists).To(BeTrue()) + Expect(err).NotTo(HaveOccurred()) + + expectedBuildPrep.Inputs = map[string]db.BuildPreparationStatus{} + }) + + It("returns build preparation", func() { + buildPrep, found, err := build.Preparation() + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + Expect(buildPrep).To(Equal(expectedBuildPrep)) + }) + }) + + Context("when pipeline is paused", func() { + BeforeEach(func() { + err := pipeline.Pause() + Expect(err).NotTo(HaveOccurred()) + + expectedBuildPrep.PausedPipeline = db.BuildPreparationStatusBlocking + }) + + It("returns build preparation with paused pipeline", func() { + buildPrep, found, err := build.Preparation() + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + Expect(buildPrep).To(Equal(expectedBuildPrep)) + }) + }) + + Context("when job is paused", func() { + BeforeEach(func() { + err := job.Pause() + Expect(err).NotTo(HaveOccurred()) + + expectedBuildPrep.PausedJob = db.BuildPreparationStatusBlocking + }) + + It("returns build preparation with paused pipeline", func() { + buildPrep, found, err := build.Preparation() + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + Expect(buildPrep).To(Equal(expectedBuildPrep)) + }) + }) + + Context("when max running builds is reached", func() { + BeforeEach(func() { + err := job.SetMaxInFlightReached(true) + Expect(err).NotTo(HaveOccurred()) + + expectedBuildPrep.MaxRunningBuilds = db.BuildPreparationStatusBlocking + }) + + It("returns build preparation with max in flight reached", func() { + buildPrep, found, err := build.Preparation() + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + Expect(buildPrep).To(Equal(expectedBuildPrep)) + }) + }) + + Context("when max running builds is de-reached", func() { + BeforeEach(func() { + err := job.SetMaxInFlightReached(true) + Expect(err).NotTo(HaveOccurred()) + + err = job.SetMaxInFlightReached(false) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns build preparation with max in flight not reached", func() { + buildPrep, found, err := build.Preparation() + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + Expect(buildPrep).To(Equal(expectedBuildPrep)) + }) }) }) - Context("when pipeline is paused", func() { + Context("when no resource check finished after build created", func() { BeforeEach(func() { - err := pipeline.Pause() + err = job.SaveNextInputMapping(algorithm.InputMapping{ + "some-input": {VersionID: rcv.ID(), ResourceID: resource.ID(), FirstOccurrence: true}, + }) Expect(err).NotTo(HaveOccurred()) - expectedBuildPrep.PausedPipeline = db.BuildPreparationStatusBlocking + expectedBuildPrep.Inputs = map[string]db.BuildPreparationStatus{ + "some-input": db.BuildPreparationStatusBlocking, + } + expectedBuildPrep.InputsSatisfied = db.BuildPreparationStatusBlocking + expectedBuildPrep.MissingInputReasons = db.MissingInputReasons{ + "some-input": db.NoResourceCheckFinished, + } }) - It("returns build preparation with paused pipeline", func() { - buildPrep, found, err := build.Preparation() - Expect(err).NotTo(HaveOccurred()) - Expect(found).To(BeTrue()) - Expect(buildPrep).To(Equal(expectedBuildPrep)) - }) - }) - - Context("when job is paused", func() { - BeforeEach(func() { - err := job.Pause() - Expect(err).NotTo(HaveOccurred()) - - expectedBuildPrep.PausedJob = db.BuildPreparationStatusBlocking - }) - - It("returns build preparation with paused pipeline", func() { - buildPrep, found, err := build.Preparation() - Expect(err).NotTo(HaveOccurred()) - Expect(found).To(BeTrue()) - Expect(buildPrep).To(Equal(expectedBuildPrep)) - }) - }) - - Context("when max running builds is reached", func() { - BeforeEach(func() { - err := job.SetMaxInFlightReached(true) - Expect(err).NotTo(HaveOccurred()) - - expectedBuildPrep.MaxRunningBuilds = db.BuildPreparationStatusBlocking - }) - - It("returns build preparation with max in flight reached", func() { - buildPrep, found, err := build.Preparation() - Expect(err).NotTo(HaveOccurred()) - Expect(found).To(BeTrue()) - Expect(buildPrep).To(Equal(expectedBuildPrep)) - }) - }) - - Context("when max running builds is de-reached", func() { - BeforeEach(func() { - err := job.SetMaxInFlightReached(true) - Expect(err).NotTo(HaveOccurred()) - - err = job.SetMaxInFlightReached(false) - Expect(err).NotTo(HaveOccurred()) - }) - - It("returns build preparation with max in flight not reached", func() { + It("returns build preparation with missing input reason", func() { buildPrep, found, err := build.Preparation() Expect(err).NotTo(HaveOccurred()) Expect(found).To(BeTrue()) @@ -876,6 +930,23 @@ var _ = Describe("Build", func() { Context("when inputs are not satisfied", func() { BeforeEach(func() { + pipeline, _, err = team.SavePipeline("some-pipeline", atc.Config{ + Resources: atc.ResourceConfigs{ + { + Name: "some-resource", + Type: "some-type", + Source: atc.Source{ + "source-config": "some-value", + }, + }, + }, + Jobs: atc.JobConfigs{ + { + Name: "some-job", + }, + }, + }, db.ConfigVersion(2), db.PipelineUnpaused) + Expect(err).ToNot(HaveOccurred()) expectedBuildPrep.InputsSatisfied = db.BuildPreparationStatusBlocking }) @@ -945,7 +1016,7 @@ var _ = Describe("Build", func() { Expect(found).To(BeTrue()) Expect(err).NotTo(HaveOccurred()) - resourceConfig6, err := resource6.SetResourceConfig(logger, atc.Source{"some": "source-6"}, creds.VersionedResourceTypes{}) + resourceConfig6, err := resource6.SetResourceConfig(atc.Source{"some": "source-6"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) err = resourceConfig6.SaveVersions([]atc.Version{{"version": "v6"}}) @@ -959,7 +1030,7 @@ var _ = Describe("Build", func() { Expect(err).NotTo(HaveOccurred()) Expect(found).To(BeTrue()) - resourceConfig1, err := resource1.SetResourceConfig(logger, atc.Source{"some": "source-1"}, creds.VersionedResourceTypes{}) + resourceConfig1, err := resource1.SetResourceConfig(atc.Source{"some": "source-1"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) err = resourceConfig1.SaveVersions([]atc.Version{{"version": "v1"}}) @@ -1117,7 +1188,7 @@ var _ = Describe("Build", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceConfig, err := resource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + resourceConfig, err := resource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceConfig.SaveVersions([]atc.Version{atc.Version{"some": "version"}}) @@ -1149,7 +1220,7 @@ var _ = Describe("Build", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceConfig, err := resource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + resourceConfig, err := resource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceConfig.SaveVersions([]atc.Version{atc.Version{"some": "weird-version"}}) @@ -1170,7 +1241,7 @@ var _ = Describe("Build", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - weirdRC, err := weirdResource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + weirdRC, err := weirdResource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = weirdRC.SaveVersions([]atc.Version{atc.Version{"weird": "version"}}) diff --git a/atc/db/container_owner_test.go b/atc/db/container_owner_test.go index 626b68094..e4ff146b0 100644 --- a/atc/db/container_owner_test.go +++ b/atc/db/container_owner_test.go @@ -5,7 +5,6 @@ import ( sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" . "github.com/onsi/ginkgo" @@ -25,8 +24,8 @@ var _ = Describe("ContainerOwner", func() { ) ownerExpiries = db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 5 * time.Minute, + Min: 5 * time.Minute, + Max: 5 * time.Minute, } BeforeEach(func() { @@ -41,12 +40,12 @@ var _ = Describe("ContainerOwner", func() { worker, err = workerFactory.SaveWorker(workerPayload, 0) Expect(err).NotTo(HaveOccurred()) - resourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig(logger, + resourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig( defaultWorkerResourceType.Type, atc.Source{ "some-type": "source", }, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) }) diff --git a/atc/db/container_repository_test.go b/atc/db/container_repository_test.go index da2677ccd..c6c73da1a 100644 --- a/atc/db/container_repository_test.go +++ b/atc/db/container_repository_test.go @@ -5,7 +5,6 @@ import ( sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/lib/pq" @@ -22,17 +21,16 @@ var _ = Describe("ContainerRepository", func() { ) expiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 1 * time.Hour, + Min: 5 * time.Minute, + Max: 1 * time.Hour, } BeforeEach(func() { var err error resourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig( - logger, "some-base-resource-type", atc.Source{"some": "source"}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) diff --git a/atc/db/dbfakes/fake_build.go b/atc/db/dbfakes/fake_build.go index 8bdb103e9..9115bb22a 100644 --- a/atc/db/dbfakes/fake_build.go +++ b/atc/db/dbfakes/fake_build.go @@ -8,7 +8,6 @@ import ( "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/lock" ) @@ -123,6 +122,16 @@ type FakeBuild struct { finishReturnsOnCall map[int]struct { result1 error } + HasPlanStub func() bool + hasPlanMutex sync.RWMutex + hasPlanArgsForCall []struct { + } + hasPlanReturns struct { + result1 bool + } + hasPlanReturnsOnCall map[int]struct { + result1 bool + } IDStub func() int iDMutex sync.RWMutex iDArgsForCall []struct { @@ -371,17 +380,16 @@ type FakeBuild struct { saveImageResourceVersionReturnsOnCall map[int]struct { result1 error } - SaveOutputStub func(lager.Logger, string, atc.Source, creds.VersionedResourceTypes, atc.Version, db.ResourceConfigMetadataFields, string, string) error + SaveOutputStub func(string, atc.Source, atc.VersionedResourceTypes, atc.Version, db.ResourceConfigMetadataFields, string, string) error saveOutputMutex sync.RWMutex saveOutputArgsForCall []struct { - arg1 lager.Logger - arg2 string - arg3 atc.Source - arg4 creds.VersionedResourceTypes - arg5 atc.Version - arg6 db.ResourceConfigMetadataFields + arg1 string + arg2 atc.Source + arg3 atc.VersionedResourceTypes + arg4 atc.Version + arg5 db.ResourceConfigMetadataFields + arg6 string arg7 string - arg8 string } saveOutputReturns struct { result1 error @@ -1023,6 +1031,58 @@ func (fake *FakeBuild) FinishReturnsOnCall(i int, result1 error) { }{result1} } +func (fake *FakeBuild) HasPlan() bool { + fake.hasPlanMutex.Lock() + ret, specificReturn := fake.hasPlanReturnsOnCall[len(fake.hasPlanArgsForCall)] + fake.hasPlanArgsForCall = append(fake.hasPlanArgsForCall, struct { + }{}) + fake.recordInvocation("HasPlan", []interface{}{}) + fake.hasPlanMutex.Unlock() + if fake.HasPlanStub != nil { + return fake.HasPlanStub() + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.hasPlanReturns + return fakeReturns.result1 +} + +func (fake *FakeBuild) HasPlanCallCount() int { + fake.hasPlanMutex.RLock() + defer fake.hasPlanMutex.RUnlock() + return len(fake.hasPlanArgsForCall) +} + +func (fake *FakeBuild) HasPlanCalls(stub func() bool) { + fake.hasPlanMutex.Lock() + defer fake.hasPlanMutex.Unlock() + fake.HasPlanStub = stub +} + +func (fake *FakeBuild) HasPlanReturns(result1 bool) { + fake.hasPlanMutex.Lock() + defer fake.hasPlanMutex.Unlock() + fake.HasPlanStub = nil + fake.hasPlanReturns = struct { + result1 bool + }{result1} +} + +func (fake *FakeBuild) HasPlanReturnsOnCall(i int, result1 bool) { + fake.hasPlanMutex.Lock() + defer fake.hasPlanMutex.Unlock() + fake.HasPlanStub = nil + if fake.hasPlanReturnsOnCall == nil { + fake.hasPlanReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.hasPlanReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + func (fake *FakeBuild) ID() int { fake.iDMutex.Lock() ret, specificReturn := fake.iDReturnsOnCall[len(fake.iDArgsForCall)] @@ -2259,23 +2319,22 @@ func (fake *FakeBuild) SaveImageResourceVersionReturnsOnCall(i int, result1 erro }{result1} } -func (fake *FakeBuild) SaveOutput(arg1 lager.Logger, arg2 string, arg3 atc.Source, arg4 creds.VersionedResourceTypes, arg5 atc.Version, arg6 db.ResourceConfigMetadataFields, arg7 string, arg8 string) error { +func (fake *FakeBuild) SaveOutput(arg1 string, arg2 atc.Source, arg3 atc.VersionedResourceTypes, arg4 atc.Version, arg5 db.ResourceConfigMetadataFields, arg6 string, arg7 string) error { fake.saveOutputMutex.Lock() ret, specificReturn := fake.saveOutputReturnsOnCall[len(fake.saveOutputArgsForCall)] fake.saveOutputArgsForCall = append(fake.saveOutputArgsForCall, struct { - arg1 lager.Logger - arg2 string - arg3 atc.Source - arg4 creds.VersionedResourceTypes - arg5 atc.Version - arg6 db.ResourceConfigMetadataFields + arg1 string + arg2 atc.Source + arg3 atc.VersionedResourceTypes + arg4 atc.Version + arg5 db.ResourceConfigMetadataFields + arg6 string arg7 string - arg8 string - }{arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8}) - fake.recordInvocation("SaveOutput", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8}) + }{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) + fake.recordInvocation("SaveOutput", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) fake.saveOutputMutex.Unlock() if fake.SaveOutputStub != nil { - return fake.SaveOutputStub(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + return fake.SaveOutputStub(arg1, arg2, arg3, arg4, arg5, arg6, arg7) } if specificReturn { return ret.result1 @@ -2290,17 +2349,17 @@ func (fake *FakeBuild) SaveOutputCallCount() int { return len(fake.saveOutputArgsForCall) } -func (fake *FakeBuild) SaveOutputCalls(stub func(lager.Logger, string, atc.Source, creds.VersionedResourceTypes, atc.Version, db.ResourceConfigMetadataFields, string, string) error) { +func (fake *FakeBuild) SaveOutputCalls(stub func(string, atc.Source, atc.VersionedResourceTypes, atc.Version, db.ResourceConfigMetadataFields, string, string) error) { fake.saveOutputMutex.Lock() defer fake.saveOutputMutex.Unlock() fake.SaveOutputStub = stub } -func (fake *FakeBuild) SaveOutputArgsForCall(i int) (lager.Logger, string, atc.Source, creds.VersionedResourceTypes, atc.Version, db.ResourceConfigMetadataFields, string, string) { +func (fake *FakeBuild) SaveOutputArgsForCall(i int) (string, atc.Source, atc.VersionedResourceTypes, atc.Version, db.ResourceConfigMetadataFields, string, string) { fake.saveOutputMutex.RLock() defer fake.saveOutputMutex.RUnlock() argsForCall := fake.saveOutputArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7, argsForCall.arg8 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7 } func (fake *FakeBuild) SaveOutputReturns(result1 error) { @@ -2910,6 +2969,8 @@ func (fake *FakeBuild) Invocations() map[string][][]interface{} { defer fake.eventsMutex.RUnlock() fake.finishMutex.RLock() defer fake.finishMutex.RUnlock() + fake.hasPlanMutex.RLock() + defer fake.hasPlanMutex.RUnlock() fake.iDMutex.RLock() defer fake.iDMutex.RUnlock() fake.interceptibleMutex.RLock() diff --git a/atc/db/dbfakes/fake_resource.go b/atc/db/dbfakes/fake_resource.go index b59304076..bd5e37d98 100644 --- a/atc/db/dbfakes/fake_resource.go +++ b/atc/db/dbfakes/fake_resource.go @@ -5,9 +5,7 @@ import ( "sync" "time" - "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" ) @@ -262,13 +260,13 @@ type FakeResource struct { result2 bool result3 error } - SaveUncheckedVersionStub func(atc.Version, db.ResourceConfigMetadataFields, db.ResourceConfig, creds.VersionedResourceTypes) (bool, error) + SaveUncheckedVersionStub func(atc.Version, db.ResourceConfigMetadataFields, db.ResourceConfig, atc.VersionedResourceTypes) (bool, error) saveUncheckedVersionMutex sync.RWMutex saveUncheckedVersionArgsForCall []struct { arg1 atc.Version arg2 db.ResourceConfigMetadataFields arg3 db.ResourceConfig - arg4 creds.VersionedResourceTypes + arg4 atc.VersionedResourceTypes } saveUncheckedVersionReturns struct { result1 bool @@ -300,12 +298,11 @@ type FakeResource struct { setPinCommentReturnsOnCall map[int]struct { result1 error } - SetResourceConfigStub func(lager.Logger, atc.Source, creds.VersionedResourceTypes) (db.ResourceConfigScope, error) + SetResourceConfigStub func(atc.Source, atc.VersionedResourceTypes) (db.ResourceConfigScope, error) setResourceConfigMutex sync.RWMutex setResourceConfigArgsForCall []struct { - arg1 lager.Logger - arg2 atc.Source - arg3 creds.VersionedResourceTypes + arg1 atc.Source + arg2 atc.VersionedResourceTypes } setResourceConfigReturns struct { result1 db.ResourceConfigScope @@ -365,6 +362,20 @@ type FakeResource struct { unpinVersionReturnsOnCall map[int]struct { result1 error } + UpdateMetadataStub func(atc.Version, db.ResourceConfigMetadataFields) (bool, error) + updateMetadataMutex sync.RWMutex + updateMetadataArgsForCall []struct { + arg1 atc.Version + arg2 db.ResourceConfigMetadataFields + } + updateMetadataReturns struct { + result1 bool + result2 error + } + updateMetadataReturnsOnCall map[int]struct { + result1 bool + result2 error + } VersionsStub func(db.Page) ([]atc.ResourceVersion, db.Pagination, bool, error) versionsMutex sync.RWMutex versionsArgsForCall []struct { @@ -1685,14 +1696,14 @@ func (fake *FakeResource) ResourceConfigVersionIDReturnsOnCall(i int, result1 in }{result1, result2, result3} } -func (fake *FakeResource) SaveUncheckedVersion(arg1 atc.Version, arg2 db.ResourceConfigMetadataFields, arg3 db.ResourceConfig, arg4 creds.VersionedResourceTypes) (bool, error) { +func (fake *FakeResource) SaveUncheckedVersion(arg1 atc.Version, arg2 db.ResourceConfigMetadataFields, arg3 db.ResourceConfig, arg4 atc.VersionedResourceTypes) (bool, error) { fake.saveUncheckedVersionMutex.Lock() ret, specificReturn := fake.saveUncheckedVersionReturnsOnCall[len(fake.saveUncheckedVersionArgsForCall)] fake.saveUncheckedVersionArgsForCall = append(fake.saveUncheckedVersionArgsForCall, struct { arg1 atc.Version arg2 db.ResourceConfigMetadataFields arg3 db.ResourceConfig - arg4 creds.VersionedResourceTypes + arg4 atc.VersionedResourceTypes }{arg1, arg2, arg3, arg4}) fake.recordInvocation("SaveUncheckedVersion", []interface{}{arg1, arg2, arg3, arg4}) fake.saveUncheckedVersionMutex.Unlock() @@ -1712,13 +1723,13 @@ func (fake *FakeResource) SaveUncheckedVersionCallCount() int { return len(fake.saveUncheckedVersionArgsForCall) } -func (fake *FakeResource) SaveUncheckedVersionCalls(stub func(atc.Version, db.ResourceConfigMetadataFields, db.ResourceConfig, creds.VersionedResourceTypes) (bool, error)) { +func (fake *FakeResource) SaveUncheckedVersionCalls(stub func(atc.Version, db.ResourceConfigMetadataFields, db.ResourceConfig, atc.VersionedResourceTypes) (bool, error)) { fake.saveUncheckedVersionMutex.Lock() defer fake.saveUncheckedVersionMutex.Unlock() fake.SaveUncheckedVersionStub = stub } -func (fake *FakeResource) SaveUncheckedVersionArgsForCall(i int) (atc.Version, db.ResourceConfigMetadataFields, db.ResourceConfig, creds.VersionedResourceTypes) { +func (fake *FakeResource) SaveUncheckedVersionArgsForCall(i int) (atc.Version, db.ResourceConfigMetadataFields, db.ResourceConfig, atc.VersionedResourceTypes) { fake.saveUncheckedVersionMutex.RLock() defer fake.saveUncheckedVersionMutex.RUnlock() argsForCall := fake.saveUncheckedVersionArgsForCall[i] @@ -1871,18 +1882,17 @@ func (fake *FakeResource) SetPinCommentReturnsOnCall(i int, result1 error) { }{result1} } -func (fake *FakeResource) SetResourceConfig(arg1 lager.Logger, arg2 atc.Source, arg3 creds.VersionedResourceTypes) (db.ResourceConfigScope, error) { +func (fake *FakeResource) SetResourceConfig(arg1 atc.Source, arg2 atc.VersionedResourceTypes) (db.ResourceConfigScope, error) { fake.setResourceConfigMutex.Lock() ret, specificReturn := fake.setResourceConfigReturnsOnCall[len(fake.setResourceConfigArgsForCall)] fake.setResourceConfigArgsForCall = append(fake.setResourceConfigArgsForCall, struct { - arg1 lager.Logger - arg2 atc.Source - arg3 creds.VersionedResourceTypes - }{arg1, arg2, arg3}) - fake.recordInvocation("SetResourceConfig", []interface{}{arg1, arg2, arg3}) + arg1 atc.Source + arg2 atc.VersionedResourceTypes + }{arg1, arg2}) + fake.recordInvocation("SetResourceConfig", []interface{}{arg1, arg2}) fake.setResourceConfigMutex.Unlock() if fake.SetResourceConfigStub != nil { - return fake.SetResourceConfigStub(arg1, arg2, arg3) + return fake.SetResourceConfigStub(arg1, arg2) } if specificReturn { return ret.result1, ret.result2 @@ -1897,17 +1907,17 @@ func (fake *FakeResource) SetResourceConfigCallCount() int { return len(fake.setResourceConfigArgsForCall) } -func (fake *FakeResource) SetResourceConfigCalls(stub func(lager.Logger, atc.Source, creds.VersionedResourceTypes) (db.ResourceConfigScope, error)) { +func (fake *FakeResource) SetResourceConfigCalls(stub func(atc.Source, atc.VersionedResourceTypes) (db.ResourceConfigScope, error)) { fake.setResourceConfigMutex.Lock() defer fake.setResourceConfigMutex.Unlock() fake.SetResourceConfigStub = stub } -func (fake *FakeResource) SetResourceConfigArgsForCall(i int) (lager.Logger, atc.Source, creds.VersionedResourceTypes) { +func (fake *FakeResource) SetResourceConfigArgsForCall(i int) (atc.Source, atc.VersionedResourceTypes) { fake.setResourceConfigMutex.RLock() defer fake.setResourceConfigMutex.RUnlock() argsForCall := fake.setResourceConfigArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 + return argsForCall.arg1, argsForCall.arg2 } func (fake *FakeResource) SetResourceConfigReturns(result1 db.ResourceConfigScope, result2 error) { @@ -2196,6 +2206,70 @@ func (fake *FakeResource) UnpinVersionReturnsOnCall(i int, result1 error) { }{result1} } +func (fake *FakeResource) UpdateMetadata(arg1 atc.Version, arg2 db.ResourceConfigMetadataFields) (bool, error) { + fake.updateMetadataMutex.Lock() + ret, specificReturn := fake.updateMetadataReturnsOnCall[len(fake.updateMetadataArgsForCall)] + fake.updateMetadataArgsForCall = append(fake.updateMetadataArgsForCall, struct { + arg1 atc.Version + arg2 db.ResourceConfigMetadataFields + }{arg1, arg2}) + fake.recordInvocation("UpdateMetadata", []interface{}{arg1, arg2}) + fake.updateMetadataMutex.Unlock() + if fake.UpdateMetadataStub != nil { + return fake.UpdateMetadataStub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.updateMetadataReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeResource) UpdateMetadataCallCount() int { + fake.updateMetadataMutex.RLock() + defer fake.updateMetadataMutex.RUnlock() + return len(fake.updateMetadataArgsForCall) +} + +func (fake *FakeResource) UpdateMetadataCalls(stub func(atc.Version, db.ResourceConfigMetadataFields) (bool, error)) { + fake.updateMetadataMutex.Lock() + defer fake.updateMetadataMutex.Unlock() + fake.UpdateMetadataStub = stub +} + +func (fake *FakeResource) UpdateMetadataArgsForCall(i int) (atc.Version, db.ResourceConfigMetadataFields) { + fake.updateMetadataMutex.RLock() + defer fake.updateMetadataMutex.RUnlock() + argsForCall := fake.updateMetadataArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeResource) UpdateMetadataReturns(result1 bool, result2 error) { + fake.updateMetadataMutex.Lock() + defer fake.updateMetadataMutex.Unlock() + fake.UpdateMetadataStub = nil + fake.updateMetadataReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeResource) UpdateMetadataReturnsOnCall(i int, result1 bool, result2 error) { + fake.updateMetadataMutex.Lock() + defer fake.updateMetadataMutex.Unlock() + fake.UpdateMetadataStub = nil + if fake.updateMetadataReturnsOnCall == nil { + fake.updateMetadataReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.updateMetadataReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + func (fake *FakeResource) Versions(arg1 db.Page) ([]atc.ResourceVersion, db.Pagination, bool, error) { fake.versionsMutex.Lock() ret, specificReturn := fake.versionsReturnsOnCall[len(fake.versionsArgsForCall)] @@ -2386,6 +2460,8 @@ func (fake *FakeResource) Invocations() map[string][][]interface{} { defer fake.typeMutex.RUnlock() fake.unpinVersionMutex.RLock() defer fake.unpinVersionMutex.RUnlock() + fake.updateMetadataMutex.RLock() + defer fake.updateMetadataMutex.RUnlock() fake.versionsMutex.RLock() defer fake.versionsMutex.RUnlock() fake.webhookTokenMutex.RLock() diff --git a/atc/db/dbfakes/fake_resource_cache_factory.go b/atc/db/dbfakes/fake_resource_cache_factory.go index 5a7d643d9..62c9a6c0c 100644 --- a/atc/db/dbfakes/fake_resource_cache_factory.go +++ b/atc/db/dbfakes/fake_resource_cache_factory.go @@ -4,23 +4,20 @@ package dbfakes import ( "sync" - "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" ) type FakeResourceCacheFactory struct { - FindOrCreateResourceCacheStub func(lager.Logger, db.ResourceCacheUser, string, atc.Version, atc.Source, atc.Params, creds.VersionedResourceTypes) (db.UsedResourceCache, error) + FindOrCreateResourceCacheStub func(db.ResourceCacheUser, string, atc.Version, atc.Source, atc.Params, atc.VersionedResourceTypes) (db.UsedResourceCache, error) findOrCreateResourceCacheMutex sync.RWMutex findOrCreateResourceCacheArgsForCall []struct { - arg1 lager.Logger - arg2 db.ResourceCacheUser - arg3 string - arg4 atc.Version - arg5 atc.Source - arg6 atc.Params - arg7 creds.VersionedResourceTypes + arg1 db.ResourceCacheUser + arg2 string + arg3 atc.Version + arg4 atc.Source + arg5 atc.Params + arg6 atc.VersionedResourceTypes } findOrCreateResourceCacheReturns struct { result1 db.UsedResourceCache @@ -59,22 +56,21 @@ type FakeResourceCacheFactory struct { invocationsMutex sync.RWMutex } -func (fake *FakeResourceCacheFactory) FindOrCreateResourceCache(arg1 lager.Logger, arg2 db.ResourceCacheUser, arg3 string, arg4 atc.Version, arg5 atc.Source, arg6 atc.Params, arg7 creds.VersionedResourceTypes) (db.UsedResourceCache, error) { +func (fake *FakeResourceCacheFactory) FindOrCreateResourceCache(arg1 db.ResourceCacheUser, arg2 string, arg3 atc.Version, arg4 atc.Source, arg5 atc.Params, arg6 atc.VersionedResourceTypes) (db.UsedResourceCache, error) { fake.findOrCreateResourceCacheMutex.Lock() ret, specificReturn := fake.findOrCreateResourceCacheReturnsOnCall[len(fake.findOrCreateResourceCacheArgsForCall)] fake.findOrCreateResourceCacheArgsForCall = append(fake.findOrCreateResourceCacheArgsForCall, struct { - arg1 lager.Logger - arg2 db.ResourceCacheUser - arg3 string - arg4 atc.Version - arg5 atc.Source - arg6 atc.Params - arg7 creds.VersionedResourceTypes - }{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) - fake.recordInvocation("FindOrCreateResourceCache", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) + arg1 db.ResourceCacheUser + arg2 string + arg3 atc.Version + arg4 atc.Source + arg5 atc.Params + arg6 atc.VersionedResourceTypes + }{arg1, arg2, arg3, arg4, arg5, arg6}) + fake.recordInvocation("FindOrCreateResourceCache", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6}) fake.findOrCreateResourceCacheMutex.Unlock() if fake.FindOrCreateResourceCacheStub != nil { - return fake.FindOrCreateResourceCacheStub(arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return fake.FindOrCreateResourceCacheStub(arg1, arg2, arg3, arg4, arg5, arg6) } if specificReturn { return ret.result1, ret.result2 @@ -89,17 +85,17 @@ func (fake *FakeResourceCacheFactory) FindOrCreateResourceCacheCallCount() int { return len(fake.findOrCreateResourceCacheArgsForCall) } -func (fake *FakeResourceCacheFactory) FindOrCreateResourceCacheCalls(stub func(lager.Logger, db.ResourceCacheUser, string, atc.Version, atc.Source, atc.Params, creds.VersionedResourceTypes) (db.UsedResourceCache, error)) { +func (fake *FakeResourceCacheFactory) FindOrCreateResourceCacheCalls(stub func(db.ResourceCacheUser, string, atc.Version, atc.Source, atc.Params, atc.VersionedResourceTypes) (db.UsedResourceCache, error)) { fake.findOrCreateResourceCacheMutex.Lock() defer fake.findOrCreateResourceCacheMutex.Unlock() fake.FindOrCreateResourceCacheStub = stub } -func (fake *FakeResourceCacheFactory) FindOrCreateResourceCacheArgsForCall(i int) (lager.Logger, db.ResourceCacheUser, string, atc.Version, atc.Source, atc.Params, creds.VersionedResourceTypes) { +func (fake *FakeResourceCacheFactory) FindOrCreateResourceCacheArgsForCall(i int) (db.ResourceCacheUser, string, atc.Version, atc.Source, atc.Params, atc.VersionedResourceTypes) { fake.findOrCreateResourceCacheMutex.RLock() defer fake.findOrCreateResourceCacheMutex.RUnlock() argsForCall := fake.findOrCreateResourceCacheArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6 } func (fake *FakeResourceCacheFactory) FindOrCreateResourceCacheReturns(result1 db.UsedResourceCache, result2 error) { diff --git a/atc/db/dbfakes/fake_resource_config_factory.go b/atc/db/dbfakes/fake_resource_config_factory.go index c6351cd56..b2398a151 100644 --- a/atc/db/dbfakes/fake_resource_config_factory.go +++ b/atc/db/dbfakes/fake_resource_config_factory.go @@ -4,9 +4,7 @@ package dbfakes import ( "sync" - "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" ) @@ -21,13 +19,12 @@ type FakeResourceConfigFactory struct { cleanUnreferencedConfigsReturnsOnCall map[int]struct { result1 error } - FindOrCreateResourceConfigStub func(lager.Logger, string, atc.Source, creds.VersionedResourceTypes) (db.ResourceConfig, error) + FindOrCreateResourceConfigStub func(string, atc.Source, atc.VersionedResourceTypes) (db.ResourceConfig, error) findOrCreateResourceConfigMutex sync.RWMutex findOrCreateResourceConfigArgsForCall []struct { - arg1 lager.Logger - arg2 string - arg3 atc.Source - arg4 creds.VersionedResourceTypes + arg1 string + arg2 atc.Source + arg3 atc.VersionedResourceTypes } findOrCreateResourceConfigReturns struct { result1 db.ResourceConfig @@ -108,19 +105,18 @@ func (fake *FakeResourceConfigFactory) CleanUnreferencedConfigsReturnsOnCall(i i }{result1} } -func (fake *FakeResourceConfigFactory) FindOrCreateResourceConfig(arg1 lager.Logger, arg2 string, arg3 atc.Source, arg4 creds.VersionedResourceTypes) (db.ResourceConfig, error) { +func (fake *FakeResourceConfigFactory) FindOrCreateResourceConfig(arg1 string, arg2 atc.Source, arg3 atc.VersionedResourceTypes) (db.ResourceConfig, error) { fake.findOrCreateResourceConfigMutex.Lock() ret, specificReturn := fake.findOrCreateResourceConfigReturnsOnCall[len(fake.findOrCreateResourceConfigArgsForCall)] fake.findOrCreateResourceConfigArgsForCall = append(fake.findOrCreateResourceConfigArgsForCall, struct { - arg1 lager.Logger - arg2 string - arg3 atc.Source - arg4 creds.VersionedResourceTypes - }{arg1, arg2, arg3, arg4}) - fake.recordInvocation("FindOrCreateResourceConfig", []interface{}{arg1, arg2, arg3, arg4}) + arg1 string + arg2 atc.Source + arg3 atc.VersionedResourceTypes + }{arg1, arg2, arg3}) + fake.recordInvocation("FindOrCreateResourceConfig", []interface{}{arg1, arg2, arg3}) fake.findOrCreateResourceConfigMutex.Unlock() if fake.FindOrCreateResourceConfigStub != nil { - return fake.FindOrCreateResourceConfigStub(arg1, arg2, arg3, arg4) + return fake.FindOrCreateResourceConfigStub(arg1, arg2, arg3) } if specificReturn { return ret.result1, ret.result2 @@ -135,17 +131,17 @@ func (fake *FakeResourceConfigFactory) FindOrCreateResourceConfigCallCount() int return len(fake.findOrCreateResourceConfigArgsForCall) } -func (fake *FakeResourceConfigFactory) FindOrCreateResourceConfigCalls(stub func(lager.Logger, string, atc.Source, creds.VersionedResourceTypes) (db.ResourceConfig, error)) { +func (fake *FakeResourceConfigFactory) FindOrCreateResourceConfigCalls(stub func(string, atc.Source, atc.VersionedResourceTypes) (db.ResourceConfig, error)) { fake.findOrCreateResourceConfigMutex.Lock() defer fake.findOrCreateResourceConfigMutex.Unlock() fake.FindOrCreateResourceConfigStub = stub } -func (fake *FakeResourceConfigFactory) FindOrCreateResourceConfigArgsForCall(i int) (lager.Logger, string, atc.Source, creds.VersionedResourceTypes) { +func (fake *FakeResourceConfigFactory) FindOrCreateResourceConfigArgsForCall(i int) (string, atc.Source, atc.VersionedResourceTypes) { fake.findOrCreateResourceConfigMutex.RLock() defer fake.findOrCreateResourceConfigMutex.RUnlock() argsForCall := fake.findOrCreateResourceConfigArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 } func (fake *FakeResourceConfigFactory) FindOrCreateResourceConfigReturns(result1 db.ResourceConfig, result2 error) { diff --git a/atc/db/dbfakes/fake_resource_config_scope.go b/atc/db/dbfakes/fake_resource_config_scope.go index 9371bd84d..bb2385eed 100644 --- a/atc/db/dbfakes/fake_resource_config_scope.go +++ b/atc/db/dbfakes/fake_resource_config_scope.go @@ -12,11 +12,10 @@ import ( ) type FakeResourceConfigScope struct { - AcquireResourceCheckingLockStub func(lager.Logger, time.Duration) (lock.Lock, bool, error) + AcquireResourceCheckingLockStub func(lager.Logger) (lock.Lock, bool, error) acquireResourceCheckingLockMutex sync.RWMutex acquireResourceCheckingLockArgsForCall []struct { arg1 lager.Logger - arg2 time.Duration } acquireResourceCheckingLockReturns struct { result1 lock.Lock @@ -149,17 +148,16 @@ type FakeResourceConfigScope struct { invocationsMutex sync.RWMutex } -func (fake *FakeResourceConfigScope) AcquireResourceCheckingLock(arg1 lager.Logger, arg2 time.Duration) (lock.Lock, bool, error) { +func (fake *FakeResourceConfigScope) AcquireResourceCheckingLock(arg1 lager.Logger) (lock.Lock, bool, error) { fake.acquireResourceCheckingLockMutex.Lock() ret, specificReturn := fake.acquireResourceCheckingLockReturnsOnCall[len(fake.acquireResourceCheckingLockArgsForCall)] fake.acquireResourceCheckingLockArgsForCall = append(fake.acquireResourceCheckingLockArgsForCall, struct { arg1 lager.Logger - arg2 time.Duration - }{arg1, arg2}) - fake.recordInvocation("AcquireResourceCheckingLock", []interface{}{arg1, arg2}) + }{arg1}) + fake.recordInvocation("AcquireResourceCheckingLock", []interface{}{arg1}) fake.acquireResourceCheckingLockMutex.Unlock() if fake.AcquireResourceCheckingLockStub != nil { - return fake.AcquireResourceCheckingLockStub(arg1, arg2) + return fake.AcquireResourceCheckingLockStub(arg1) } if specificReturn { return ret.result1, ret.result2, ret.result3 @@ -174,17 +172,17 @@ func (fake *FakeResourceConfigScope) AcquireResourceCheckingLockCallCount() int return len(fake.acquireResourceCheckingLockArgsForCall) } -func (fake *FakeResourceConfigScope) AcquireResourceCheckingLockCalls(stub func(lager.Logger, time.Duration) (lock.Lock, bool, error)) { +func (fake *FakeResourceConfigScope) AcquireResourceCheckingLockCalls(stub func(lager.Logger) (lock.Lock, bool, error)) { fake.acquireResourceCheckingLockMutex.Lock() defer fake.acquireResourceCheckingLockMutex.Unlock() fake.AcquireResourceCheckingLockStub = stub } -func (fake *FakeResourceConfigScope) AcquireResourceCheckingLockArgsForCall(i int) (lager.Logger, time.Duration) { +func (fake *FakeResourceConfigScope) AcquireResourceCheckingLockArgsForCall(i int) lager.Logger { fake.acquireResourceCheckingLockMutex.RLock() defer fake.acquireResourceCheckingLockMutex.RUnlock() argsForCall := fake.acquireResourceCheckingLockArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 + return argsForCall.arg1 } func (fake *FakeResourceConfigScope) AcquireResourceCheckingLockReturns(result1 lock.Lock, result2 bool, result3 error) { diff --git a/atc/db/dbfakes/fake_resource_type.go b/atc/db/dbfakes/fake_resource_type.go index f65b5853b..af1c9aeba 100644 --- a/atc/db/dbfakes/fake_resource_type.go +++ b/atc/db/dbfakes/fake_resource_type.go @@ -4,9 +4,7 @@ package dbfakes import ( "sync" - "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" ) @@ -104,12 +102,11 @@ type FakeResourceType struct { setCheckSetupErrorReturnsOnCall map[int]struct { result1 error } - SetResourceConfigStub func(lager.Logger, atc.Source, creds.VersionedResourceTypes) (db.ResourceConfigScope, error) + SetResourceConfigStub func(atc.Source, atc.VersionedResourceTypes) (db.ResourceConfigScope, error) setResourceConfigMutex sync.RWMutex setResourceConfigArgsForCall []struct { - arg1 lager.Logger - arg2 atc.Source - arg3 creds.VersionedResourceTypes + arg1 atc.Source + arg2 atc.VersionedResourceTypes } setResourceConfigReturns struct { result1 db.ResourceConfigScope @@ -652,18 +649,17 @@ func (fake *FakeResourceType) SetCheckSetupErrorReturnsOnCall(i int, result1 err }{result1} } -func (fake *FakeResourceType) SetResourceConfig(arg1 lager.Logger, arg2 atc.Source, arg3 creds.VersionedResourceTypes) (db.ResourceConfigScope, error) { +func (fake *FakeResourceType) SetResourceConfig(arg1 atc.Source, arg2 atc.VersionedResourceTypes) (db.ResourceConfigScope, error) { fake.setResourceConfigMutex.Lock() ret, specificReturn := fake.setResourceConfigReturnsOnCall[len(fake.setResourceConfigArgsForCall)] fake.setResourceConfigArgsForCall = append(fake.setResourceConfigArgsForCall, struct { - arg1 lager.Logger - arg2 atc.Source - arg3 creds.VersionedResourceTypes - }{arg1, arg2, arg3}) - fake.recordInvocation("SetResourceConfig", []interface{}{arg1, arg2, arg3}) + arg1 atc.Source + arg2 atc.VersionedResourceTypes + }{arg1, arg2}) + fake.recordInvocation("SetResourceConfig", []interface{}{arg1, arg2}) fake.setResourceConfigMutex.Unlock() if fake.SetResourceConfigStub != nil { - return fake.SetResourceConfigStub(arg1, arg2, arg3) + return fake.SetResourceConfigStub(arg1, arg2) } if specificReturn { return ret.result1, ret.result2 @@ -678,17 +674,17 @@ func (fake *FakeResourceType) SetResourceConfigCallCount() int { return len(fake.setResourceConfigArgsForCall) } -func (fake *FakeResourceType) SetResourceConfigCalls(stub func(lager.Logger, atc.Source, creds.VersionedResourceTypes) (db.ResourceConfigScope, error)) { +func (fake *FakeResourceType) SetResourceConfigCalls(stub func(atc.Source, atc.VersionedResourceTypes) (db.ResourceConfigScope, error)) { fake.setResourceConfigMutex.Lock() defer fake.setResourceConfigMutex.Unlock() fake.SetResourceConfigStub = stub } -func (fake *FakeResourceType) SetResourceConfigArgsForCall(i int) (lager.Logger, atc.Source, creds.VersionedResourceTypes) { +func (fake *FakeResourceType) SetResourceConfigArgsForCall(i int) (atc.Source, atc.VersionedResourceTypes) { fake.setResourceConfigMutex.RLock() defer fake.setResourceConfigMutex.RUnlock() argsForCall := fake.setResourceConfigArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 + return argsForCall.arg1, argsForCall.arg2 } func (fake *FakeResourceType) SetResourceConfigReturns(result1 db.ResourceConfigScope, result2 error) { diff --git a/atc/db/dbfakes/fake_team.go b/atc/db/dbfakes/fake_team.go index af1db6ff7..cb663aad4 100644 --- a/atc/db/dbfakes/fake_team.go +++ b/atc/db/dbfakes/fake_team.go @@ -5,7 +5,6 @@ import ( "sync" "time" - "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" @@ -62,10 +61,9 @@ type FakeTeam struct { result2 db.Pagination result3 error } - ContainersStub func(lager.Logger) ([]db.Container, error) + ContainersStub func() ([]db.Container, error) containersMutex sync.RWMutex containersArgsForCall []struct { - arg1 lager.Logger } containersReturns struct { result1 []db.Container @@ -110,13 +108,12 @@ type FakeTeam struct { deleteReturnsOnCall map[int]struct { result1 error } - FindCheckContainersStub func(lager.Logger, string, string, creds.Secrets) ([]db.Container, map[int]time.Time, error) + FindCheckContainersStub func(string, string, creds.Secrets) ([]db.Container, map[int]time.Time, error) findCheckContainersMutex sync.RWMutex findCheckContainersArgsForCall []struct { - arg1 lager.Logger + arg1 string arg2 string - arg3 string - arg4 creds.Secrets + arg3 creds.Secrets } findCheckContainersReturns struct { result1 []db.Container @@ -646,16 +643,15 @@ func (fake *FakeTeam) BuildsWithTimeReturnsOnCall(i int, result1 []db.Build, res }{result1, result2, result3} } -func (fake *FakeTeam) Containers(arg1 lager.Logger) ([]db.Container, error) { +func (fake *FakeTeam) Containers() ([]db.Container, error) { fake.containersMutex.Lock() ret, specificReturn := fake.containersReturnsOnCall[len(fake.containersArgsForCall)] fake.containersArgsForCall = append(fake.containersArgsForCall, struct { - arg1 lager.Logger - }{arg1}) - fake.recordInvocation("Containers", []interface{}{arg1}) + }{}) + fake.recordInvocation("Containers", []interface{}{}) fake.containersMutex.Unlock() if fake.ContainersStub != nil { - return fake.ContainersStub(arg1) + return fake.ContainersStub() } if specificReturn { return ret.result1, ret.result2 @@ -670,19 +666,12 @@ func (fake *FakeTeam) ContainersCallCount() int { return len(fake.containersArgsForCall) } -func (fake *FakeTeam) ContainersCalls(stub func(lager.Logger) ([]db.Container, error)) { +func (fake *FakeTeam) ContainersCalls(stub func() ([]db.Container, error)) { fake.containersMutex.Lock() defer fake.containersMutex.Unlock() fake.ContainersStub = stub } -func (fake *FakeTeam) ContainersArgsForCall(i int) lager.Logger { - fake.containersMutex.RLock() - defer fake.containersMutex.RUnlock() - argsForCall := fake.containersArgsForCall[i] - return argsForCall.arg1 -} - func (fake *FakeTeam) ContainersReturns(result1 []db.Container, result2 error) { fake.containersMutex.Lock() defer fake.containersMutex.Unlock() @@ -879,19 +868,18 @@ func (fake *FakeTeam) DeleteReturnsOnCall(i int, result1 error) { }{result1} } -func (fake *FakeTeam) FindCheckContainers(arg1 lager.Logger, arg2 string, arg3 string, arg4 creds.Secrets) ([]db.Container, map[int]time.Time, error) { +func (fake *FakeTeam) FindCheckContainers(arg1 string, arg2 string, arg3 creds.Secrets) ([]db.Container, map[int]time.Time, error) { fake.findCheckContainersMutex.Lock() ret, specificReturn := fake.findCheckContainersReturnsOnCall[len(fake.findCheckContainersArgsForCall)] fake.findCheckContainersArgsForCall = append(fake.findCheckContainersArgsForCall, struct { - arg1 lager.Logger + arg1 string arg2 string - arg3 string - arg4 creds.Secrets - }{arg1, arg2, arg3, arg4}) - fake.recordInvocation("FindCheckContainers", []interface{}{arg1, arg2, arg3, arg4}) + arg3 creds.Secrets + }{arg1, arg2, arg3}) + fake.recordInvocation("FindCheckContainers", []interface{}{arg1, arg2, arg3}) fake.findCheckContainersMutex.Unlock() if fake.FindCheckContainersStub != nil { - return fake.FindCheckContainersStub(arg1, arg2, arg3, arg4) + return fake.FindCheckContainersStub(arg1, arg2, arg3) } if specificReturn { return ret.result1, ret.result2, ret.result3 @@ -906,17 +894,17 @@ func (fake *FakeTeam) FindCheckContainersCallCount() int { return len(fake.findCheckContainersArgsForCall) } -func (fake *FakeTeam) FindCheckContainersCalls(stub func(lager.Logger, string, string, creds.Secrets) ([]db.Container, map[int]time.Time, error)) { +func (fake *FakeTeam) FindCheckContainersCalls(stub func(string, string, creds.Secrets) ([]db.Container, map[int]time.Time, error)) { fake.findCheckContainersMutex.Lock() defer fake.findCheckContainersMutex.Unlock() fake.FindCheckContainersStub = stub } -func (fake *FakeTeam) FindCheckContainersArgsForCall(i int) (lager.Logger, string, string, creds.Secrets) { +func (fake *FakeTeam) FindCheckContainersArgsForCall(i int) (string, string, creds.Secrets) { fake.findCheckContainersMutex.RLock() defer fake.findCheckContainersMutex.RUnlock() argsForCall := fake.findCheckContainersArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 } func (fake *FakeTeam) FindCheckContainersReturns(result1 []db.Container, result2 map[int]time.Time, result3 error) { diff --git a/atc/db/dbfakes/fake_worker.go b/atc/db/dbfakes/fake_worker.go index dafe31380..2dd37b5d0 100644 --- a/atc/db/dbfakes/fake_worker.go +++ b/atc/db/dbfakes/fake_worker.go @@ -94,17 +94,17 @@ type FakeWorker struct { expiresAtReturnsOnCall map[int]struct { result1 time.Time } - FindContainerOnWorkerStub func(db.ContainerOwner) (db.CreatingContainer, db.CreatedContainer, error) - findContainerOnWorkerMutex sync.RWMutex - findContainerOnWorkerArgsForCall []struct { + FindContainerStub func(db.ContainerOwner) (db.CreatingContainer, db.CreatedContainer, error) + findContainerMutex sync.RWMutex + findContainerArgsForCall []struct { arg1 db.ContainerOwner } - findContainerOnWorkerReturns struct { + findContainerReturns struct { result1 db.CreatingContainer result2 db.CreatedContainer result3 error } - findContainerOnWorkerReturnsOnCall map[int]struct { + findContainerReturnsOnCall map[int]struct { result1 db.CreatingContainer result2 db.CreatedContainer result3 error @@ -727,66 +727,66 @@ func (fake *FakeWorker) ExpiresAtReturnsOnCall(i int, result1 time.Time) { }{result1} } -func (fake *FakeWorker) FindContainerOnWorker(arg1 db.ContainerOwner) (db.CreatingContainer, db.CreatedContainer, error) { - fake.findContainerOnWorkerMutex.Lock() - ret, specificReturn := fake.findContainerOnWorkerReturnsOnCall[len(fake.findContainerOnWorkerArgsForCall)] - fake.findContainerOnWorkerArgsForCall = append(fake.findContainerOnWorkerArgsForCall, struct { +func (fake *FakeWorker) FindContainer(arg1 db.ContainerOwner) (db.CreatingContainer, db.CreatedContainer, error) { + fake.findContainerMutex.Lock() + ret, specificReturn := fake.findContainerReturnsOnCall[len(fake.findContainerArgsForCall)] + fake.findContainerArgsForCall = append(fake.findContainerArgsForCall, struct { arg1 db.ContainerOwner }{arg1}) - fake.recordInvocation("FindContainerOnWorker", []interface{}{arg1}) - fake.findContainerOnWorkerMutex.Unlock() - if fake.FindContainerOnWorkerStub != nil { - return fake.FindContainerOnWorkerStub(arg1) + fake.recordInvocation("FindContainer", []interface{}{arg1}) + fake.findContainerMutex.Unlock() + if fake.FindContainerStub != nil { + return fake.FindContainerStub(arg1) } if specificReturn { return ret.result1, ret.result2, ret.result3 } - fakeReturns := fake.findContainerOnWorkerReturns + fakeReturns := fake.findContainerReturns return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3 } -func (fake *FakeWorker) FindContainerOnWorkerCallCount() int { - fake.findContainerOnWorkerMutex.RLock() - defer fake.findContainerOnWorkerMutex.RUnlock() - return len(fake.findContainerOnWorkerArgsForCall) +func (fake *FakeWorker) FindContainerCallCount() int { + fake.findContainerMutex.RLock() + defer fake.findContainerMutex.RUnlock() + return len(fake.findContainerArgsForCall) } -func (fake *FakeWorker) FindContainerOnWorkerCalls(stub func(db.ContainerOwner) (db.CreatingContainer, db.CreatedContainer, error)) { - fake.findContainerOnWorkerMutex.Lock() - defer fake.findContainerOnWorkerMutex.Unlock() - fake.FindContainerOnWorkerStub = stub +func (fake *FakeWorker) FindContainerCalls(stub func(db.ContainerOwner) (db.CreatingContainer, db.CreatedContainer, error)) { + fake.findContainerMutex.Lock() + defer fake.findContainerMutex.Unlock() + fake.FindContainerStub = stub } -func (fake *FakeWorker) FindContainerOnWorkerArgsForCall(i int) db.ContainerOwner { - fake.findContainerOnWorkerMutex.RLock() - defer fake.findContainerOnWorkerMutex.RUnlock() - argsForCall := fake.findContainerOnWorkerArgsForCall[i] +func (fake *FakeWorker) FindContainerArgsForCall(i int) db.ContainerOwner { + fake.findContainerMutex.RLock() + defer fake.findContainerMutex.RUnlock() + argsForCall := fake.findContainerArgsForCall[i] return argsForCall.arg1 } -func (fake *FakeWorker) FindContainerOnWorkerReturns(result1 db.CreatingContainer, result2 db.CreatedContainer, result3 error) { - fake.findContainerOnWorkerMutex.Lock() - defer fake.findContainerOnWorkerMutex.Unlock() - fake.FindContainerOnWorkerStub = nil - fake.findContainerOnWorkerReturns = struct { +func (fake *FakeWorker) FindContainerReturns(result1 db.CreatingContainer, result2 db.CreatedContainer, result3 error) { + fake.findContainerMutex.Lock() + defer fake.findContainerMutex.Unlock() + fake.FindContainerStub = nil + fake.findContainerReturns = struct { result1 db.CreatingContainer result2 db.CreatedContainer result3 error }{result1, result2, result3} } -func (fake *FakeWorker) FindContainerOnWorkerReturnsOnCall(i int, result1 db.CreatingContainer, result2 db.CreatedContainer, result3 error) { - fake.findContainerOnWorkerMutex.Lock() - defer fake.findContainerOnWorkerMutex.Unlock() - fake.FindContainerOnWorkerStub = nil - if fake.findContainerOnWorkerReturnsOnCall == nil { - fake.findContainerOnWorkerReturnsOnCall = make(map[int]struct { +func (fake *FakeWorker) FindContainerReturnsOnCall(i int, result1 db.CreatingContainer, result2 db.CreatedContainer, result3 error) { + fake.findContainerMutex.Lock() + defer fake.findContainerMutex.Unlock() + fake.FindContainerStub = nil + if fake.findContainerReturnsOnCall == nil { + fake.findContainerReturnsOnCall = make(map[int]struct { result1 db.CreatingContainer result2 db.CreatedContainer result3 error }) } - fake.findContainerOnWorkerReturnsOnCall[i] = struct { + fake.findContainerReturnsOnCall[i] = struct { result1 db.CreatingContainer result2 db.CreatedContainer result3 error @@ -1757,8 +1757,8 @@ func (fake *FakeWorker) Invocations() map[string][][]interface{} { defer fake.ephemeralMutex.RUnlock() fake.expiresAtMutex.RLock() defer fake.expiresAtMutex.RUnlock() - fake.findContainerOnWorkerMutex.RLock() - defer fake.findContainerOnWorkerMutex.RUnlock() + fake.findContainerMutex.RLock() + defer fake.findContainerMutex.RUnlock() fake.gardenAddrMutex.RLock() defer fake.gardenAddrMutex.RUnlock() fake.hTTPProxyURLMutex.RLock() diff --git a/atc/db/job_test.go b/atc/db/job_test.go index 1b6d33b8a..d2bcc6abb 100644 --- a/atc/db/job_test.go +++ b/atc/db/job_test.go @@ -4,7 +4,6 @@ import ( "time" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/algorithm" . "github.com/onsi/ginkgo" @@ -747,7 +746,7 @@ var _ = Describe("Job", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceConfigScope, err = resource.SetResourceConfig(logger, atc.Source{}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = resource.SetResourceConfig(atc.Source{}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceConfigScope.SaveVersions([]atc.Version{ @@ -763,7 +762,7 @@ var _ = Describe("Job", func() { Name: "name1", Value: "value1", }, - }, resourceConfigScope.ResourceConfig(), creds.VersionedResourceTypes{}) + }, resourceConfigScope.ResourceConfig(), atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) reversions, _, found, err := resource.Versions(db.Page{Limit: 3}) @@ -927,7 +926,7 @@ var _ = Describe("Job", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceConfigScope, err = resource.SetResourceConfig(logger, atc.Source{}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = resource.SetResourceConfig(atc.Source{}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceConfigScope.SaveVersions([]atc.Version{ @@ -943,7 +942,7 @@ var _ = Describe("Job", func() { Name: "name1", Value: "value1", }, - }, resourceConfigScope.ResourceConfig(), creds.VersionedResourceTypes{}) + }, resourceConfigScope.ResourceConfig(), atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) reversions, _, found, err := resource.Versions(db.Page{Limit: 3}) diff --git a/atc/db/lock/lock.go b/atc/db/lock/lock.go index 060672d13..85566f021 100644 --- a/atc/db/lock/lock.go +++ b/atc/db/lock/lock.go @@ -44,8 +44,8 @@ func NewVolumeCreatingLockID(volumeID int) LockID { return LockID{LockTypeVolumeCreating, volumeID} } -func NewContainerCreatingLockID(containerID int) LockID { - return LockID{LockTypeContainerCreating, containerID} +func NewContainerCreatingLockID() LockID { + return LockID{LockTypeContainerCreating} } func NewDatabaseMigrationLockID() LockID { diff --git a/atc/db/pipeline_test.go b/atc/db/pipeline_test.go index 6737991da..44b82dbfd 100644 --- a/atc/db/pipeline_test.go +++ b/atc/db/pipeline_test.go @@ -5,7 +5,6 @@ import ( "time" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/algorithm" "github.com/concourse/concourse/atc/event" @@ -405,13 +404,13 @@ var _ = Describe("Pipeline", func() { otherPipelineResource, _, err = otherDBPipeline.Resource(otherResourceName) Expect(err).ToNot(HaveOccurred()) - resourceConfigScope, err = resource.SetResourceConfig(logger, atc.Source{"source-config": "some-value"}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = resource.SetResourceConfig(atc.Source{"source-config": "some-value"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) - otherResourceConfigScope, err = otherPipelineResource.SetResourceConfig(logger, atc.Source{"other-source-config": "some-other-value"}, creds.VersionedResourceTypes{}) + otherResourceConfigScope, err = otherPipelineResource.SetResourceConfig(atc.Source{"other-source-config": "some-other-value"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) - _, err = reallyOtherResource.SetResourceConfig(logger, atc.Source{"source-config": "some-really-other-value"}, creds.VersionedResourceTypes{}) + _, err = reallyOtherResource.SetResourceConfig(atc.Source{"source-config": "some-really-other-value"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) }) @@ -553,7 +552,7 @@ var _ = Describe("Pipeline", func() { build1DB, err := aJob.CreateBuild() Expect(err).ToNot(HaveOccurred()) - err = build1DB.SaveOutput(logger, "some-type", atc.Source{"source-config": "some-value"}, creds.VersionedResourceTypes{}, atc.Version{"version": "1"}, nil, "some-output-name", "some-resource") + err = build1DB.SaveOutput("some-type", atc.Source{"source-config": "some-value"}, atc.VersionedResourceTypes{}, atc.Version{"version": "1"}, nil, "some-output-name", "some-resource") Expect(err).ToNot(HaveOccurred()) err = build1DB.Finish(db.BuildStatusSucceeded) @@ -600,7 +599,7 @@ var _ = Describe("Pipeline", func() { build2DB, err := aJob.CreateBuild() Expect(err).ToNot(HaveOccurred()) - err = build2DB.SaveOutput(logger, "some-type", atc.Source{"source-config": "some-value"}, creds.VersionedResourceTypes{}, atc.Version{"version": "1"}, nil, "some-output-name", "some-resource") + err = build2DB.SaveOutput("some-type", atc.Source{"source-config": "some-value"}, atc.VersionedResourceTypes{}, atc.Version{"version": "1"}, nil, "some-output-name", "some-resource") Expect(err).ToNot(HaveOccurred()) err = build2DB.Finish(db.BuildStatusFailed) @@ -649,7 +648,7 @@ var _ = Describe("Pipeline", func() { otherPipelineBuild, err := anotherJob.CreateBuild() Expect(err).ToNot(HaveOccurred()) - err = otherPipelineBuild.SaveOutput(logger, "some-type", atc.Source{"other-source-config": "some-other-value"}, creds.VersionedResourceTypes{}, atc.Version{"version": "1"}, nil, "some-output-name", "some-other-resource") + err = otherPipelineBuild.SaveOutput("some-type", atc.Source{"other-source-config": "some-other-value"}, atc.VersionedResourceTypes{}, atc.Version{"version": "1"}, nil, "some-output-name", "some-other-resource") Expect(err).ToNot(HaveOccurred()) err = otherPipelineBuild.Finish(db.BuildStatusSucceeded) @@ -883,7 +882,7 @@ var _ = Describe("Pipeline", func() { }) Expect(err).ToNot(HaveOccurred()) - err = build.SaveOutput(logger, "some-type", atc.Source{"source-config": "some-value"}, creds.VersionedResourceTypes{}, atc.Version(beforeVR.Version()), nil, "some-output-name", "some-resource") + err = build.SaveOutput("some-type", atc.Source{"source-config": "some-value"}, atc.VersionedResourceTypes{}, atc.Version(beforeVR.Version()), nil, "some-output-name", "some-resource") Expect(err).ToNot(HaveOccurred()) versions, _, found, err := resource.Versions(db.Page{Limit: 10}) @@ -906,7 +905,7 @@ var _ = Describe("Pipeline", func() { savedResource, _, err := dbPipeline.Resource("some-resource") Expect(err).ToNot(HaveOccurred()) - resourceConfigScope, err = savedResource.SetResourceConfig(logger, atc.Source{"source-config": "some-value"}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = savedResource.SetResourceConfig(atc.Source{"source-config": "some-value"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) savedVR, found, err := resourceConfigScope.LatestVersion() @@ -966,7 +965,7 @@ var _ = Describe("Pipeline", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceConfigScope, err = resource.SetResourceConfig(logger, atc.Source{"some-source": "some-value"}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = resource.SetResourceConfig(atc.Source{"some-source": "some-value"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) }) @@ -992,7 +991,7 @@ var _ = Describe("Pipeline", func() { ResourceID: resource.ID(), } - err = build1.SaveOutput(logger, "some-type", atc.Source{"some-source": "some-value"}, creds.VersionedResourceTypes{}, atc.Version{"version": "disabled"}, nil, "some-output-name", "some-resource") + err = build1.SaveOutput("some-type", atc.Source{"some-source": "some-value"}, atc.VersionedResourceTypes{}, atc.Version{"version": "disabled"}, nil, "some-output-name", "some-resource") Expect(err).ToNot(HaveOccurred()) err = resourceConfigScope.SaveVersions([]atc.Version{{"version": "enabled"}}) @@ -1019,7 +1018,7 @@ var _ = Describe("Pipeline", func() { Expect(err).ToNot(HaveOccurred()) - err = build1.SaveOutput(logger, "some-type", atc.Source{"some-source": "some-value"}, creds.VersionedResourceTypes{}, atc.Version{"version": "other-enabled"}, nil, "some-output-name", "some-resource") + err = build1.SaveOutput("some-type", atc.Source{"some-source": "some-value"}, atc.VersionedResourceTypes{}, atc.Version{"version": "other-enabled"}, nil, "some-output-name", "some-resource") Expect(err).ToNot(HaveOccurred()) err = build1.Finish(db.BuildStatusSucceeded) @@ -1105,7 +1104,7 @@ var _ = Describe("Pipeline", func() { Expect(found).To(BeTrue()) Expect(err).ToNot(HaveOccurred()) - resourceConfigScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = resource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) By("populating resource versions") @@ -1135,7 +1134,7 @@ var _ = Describe("Pipeline", func() { Expect(err).ToNot(HaveOccurred()) By("populating build outputs") - err = build.SaveOutput(logger, "some-type", atc.Source{"some": "source"}, creds.VersionedResourceTypes{}, atc.Version{"key": "value"}, nil, "some-output-name", "some-resource") + err = build.SaveOutput("some-type", atc.Source{"some": "source"}, atc.VersionedResourceTypes{}, atc.Version{"key": "value"}, nil, "some-output-name", "some-resource") Expect(err).ToNot(HaveOccurred()) By("populating build events") @@ -1221,7 +1220,7 @@ var _ = Describe("Pipeline", func() { savedResource, _, err = pipeline.Resource("some-resource") Expect(err).ToNot(HaveOccurred()) - resourceConfigScope, err = savedResource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = savedResource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceConfigScope.SaveVersions([]atc.Version{{"version": "1"}}) @@ -1233,7 +1232,7 @@ var _ = Describe("Pipeline", func() { }) It("will cache VersionsDB if no change has occured", func() { - err := build.SaveOutput(logger, "some-type", atc.Source{"some": "source"}, creds.VersionedResourceTypes{}, atc.Version(savedVR.Version()), nil, "some-output-name", "some-resource") + err := build.SaveOutput("some-type", atc.Source{"some": "source"}, atc.VersionedResourceTypes{}, atc.Version(savedVR.Version()), nil, "some-output-name", "some-resource") Expect(err).ToNot(HaveOccurred()) versionsDB, err := pipeline.LoadVersionsDB() @@ -1294,7 +1293,7 @@ var _ = Describe("Pipeline", func() { otherSavedResource, _, err := otherPipeline.Resource("some-other-resource") Expect(err).ToNot(HaveOccurred()) - otherResourceConfigScope, err := otherSavedResource.SetResourceConfig(logger, atc.Source{"some-source": "some-other-value"}, creds.VersionedResourceTypes{}) + otherResourceConfigScope, err := otherSavedResource.SetResourceConfig(atc.Source{"some-source": "some-other-value"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) otherResourceConfigScope.SaveVersions([]atc.Version{{"version": "1"}}) @@ -1307,7 +1306,7 @@ var _ = Describe("Pipeline", func() { versionsDB, err := pipeline.LoadVersionsDB() Expect(err).ToNot(HaveOccurred()) - err = otherBuild.SaveOutput(logger, "some-type", atc.Source{"some-source": "some-other-value"}, creds.VersionedResourceTypes{}, atc.Version(otherSavedVR.Version()), nil, "some-output-name", "some-other-resource") + err = otherBuild.SaveOutput("some-type", atc.Source{"some-source": "some-other-value"}, atc.VersionedResourceTypes{}, atc.Version(otherSavedVR.Version()), nil, "some-output-name", "some-other-resource") Expect(err).ToNot(HaveOccurred()) cachedVersionsDB, err := pipeline.LoadVersionsDB() @@ -1330,10 +1329,10 @@ var _ = Describe("Pipeline", func() { otherResource, _, err := pipeline.Resource("some-other-resource") Expect(err).ToNot(HaveOccurred()) - resourceConfigScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = resource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) - otherResourceConfigScope, err = otherResource.SetResourceConfig(logger, atc.Source{"some": "other-source"}, creds.VersionedResourceTypes{}) + otherResourceConfigScope, err = otherResource.SetResourceConfig(atc.Source{"some": "other-source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) }) @@ -1369,7 +1368,7 @@ var _ = Describe("Pipeline", func() { Expect(err).ToNot(HaveOccurred()) By("creating a new version but not updating the check order yet") - created, err := resource.SaveUncheckedVersion(atc.Version{"version": "1"}, nil, resourceConfigScope.ResourceConfig(), creds.VersionedResourceTypes{}) + created, err := resource.SaveUncheckedVersion(atc.Version{"version": "1"}, nil, resourceConfigScope.ResourceConfig(), atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) Expect(created).To(BeTrue()) @@ -1397,7 +1396,7 @@ var _ = Describe("Pipeline", func() { otherPipelineResource, _, err := otherPipeline.Resource("some-other-resource") Expect(err).ToNot(HaveOccurred()) - otherPipelineResourceConfig, err := otherPipelineResource.SetResourceConfig(logger, atc.Source{"some-source": "some-other-value"}, creds.VersionedResourceTypes{}) + otherPipelineResourceConfig, err := otherPipelineResource.SetResourceConfig(atc.Source{"some-source": "some-other-value"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = otherPipelineResourceConfig.SaveVersions([]atc.Version{{"version": "1"}}) @@ -1695,7 +1694,7 @@ var _ = Describe("Pipeline", func() { resource, _, err = pipeline.Resource("some-resource") Expect(err).ToNot(HaveOccurred()) - resourceConfigScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = resource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceConfigScope.SaveVersions([]atc.Version{atc.Version{"version": "v1"}}) @@ -1829,7 +1828,7 @@ var _ = Describe("Pipeline", func() { resource, _, err = pipeline.Resource("some-resource") Expect(err).ToNot(HaveOccurred()) - resourceConfigScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + resourceConfigScope, err = resource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceConfigScope.SaveVersions([]atc.Version{ @@ -1838,7 +1837,7 @@ var _ = Describe("Pipeline", func() { }) Expect(err).ToNot(HaveOccurred()) - err = dbBuild.SaveOutput(logger, "some-type", atc.Source{"some": "source"}, creds.VersionedResourceTypes{}, atc.Version{"version": "v1"}, []db.ResourceConfigMetadataField{ + err = dbBuild.SaveOutput("some-type", atc.Source{"some": "source"}, atc.VersionedResourceTypes{}, atc.Version{"version": "v1"}, []db.ResourceConfigMetadataField{ { Name: "some", Value: "value", @@ -1850,7 +1849,7 @@ var _ = Describe("Pipeline", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - err = dbSecondBuild.SaveOutput(logger, "some-type", atc.Source{"some": "source"}, creds.VersionedResourceTypes{}, atc.Version{"version": "v1"}, []db.ResourceConfigMetadataField{ + err = dbSecondBuild.SaveOutput("some-type", atc.Source{"some": "source"}, atc.VersionedResourceTypes{}, atc.Version{"version": "v1"}, []db.ResourceConfigMetadataField{ { Name: "some", Value: "value", @@ -1858,7 +1857,7 @@ var _ = Describe("Pipeline", func() { }, "some-output-name", "some-resource") Expect(err).ToNot(HaveOccurred()) - err = dbSecondBuild.SaveOutput(logger, "some-type", atc.Source{"some": "source"}, creds.VersionedResourceTypes{}, atc.Version{"version": "v3"}, nil, "some-output-name", "some-resource") + err = dbSecondBuild.SaveOutput("some-type", atc.Source{"some": "source"}, atc.VersionedResourceTypes{}, atc.Version{"version": "v3"}, nil, "some-output-name", "some-resource") Expect(err).ToNot(HaveOccurred()) rcv1, found, err := resourceConfigScope.FindVersion(atc.Version{"version": "v1"}) @@ -2037,7 +2036,7 @@ var _ = Describe("Pipeline", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - resourceTypeScope, err := resourceType.SetResourceConfig(logger, atc.Source{"some": "type-source"}, creds.VersionedResourceTypes{}) + resourceTypeScope, err := resourceType.SetResourceConfig(atc.Source{"some": "type-source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceTypeScope.SaveVersions([]atc.Version{ @@ -2046,7 +2045,7 @@ var _ = Describe("Pipeline", func() { }) Expect(err).ToNot(HaveOccurred()) - otherResourceTypeScope, err := otherResourceType.SetResourceConfig(logger, atc.Source{"some": "other-type-source"}, creds.VersionedResourceTypes{}) + otherResourceTypeScope, err := otherResourceType.SetResourceConfig(atc.Source{"some": "other-type-source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = otherResourceTypeScope.SaveVersions([]atc.Version{ @@ -2088,7 +2087,7 @@ var _ = Describe("Pipeline", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceConfig, err := resource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + resourceConfig, err := resource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) version := atc.Version{"version": "1"} diff --git a/atc/db/resource.go b/atc/db/resource.go index 8bce01cc3..b9fc110a8 100644 --- a/atc/db/resource.go +++ b/atc/db/resource.go @@ -8,10 +8,8 @@ import ( "strconv" "time" - "code.cloudfoundry.org/lager" sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db/lock" "github.com/lib/pq" ) @@ -47,7 +45,8 @@ type Resource interface { ResourceConfigVersionID(atc.Version) (int, bool, error) Versions(page Page) ([]atc.ResourceVersion, Pagination, bool, error) - SaveUncheckedVersion(atc.Version, ResourceConfigMetadataFields, ResourceConfig, creds.VersionedResourceTypes) (bool, error) + SaveUncheckedVersion(atc.Version, ResourceConfigMetadataFields, ResourceConfig, atc.VersionedResourceTypes) (bool, error) + UpdateMetadata(atc.Version, ResourceConfigMetadataFields) (bool, error) EnableVersion(rcvID int) error DisableVersion(rcvID int) error @@ -55,7 +54,7 @@ type Resource interface { PinVersion(rcvID int) error UnpinVersion() error - SetResourceConfig(lager.Logger, atc.Source, creds.VersionedResourceTypes) (ResourceConfigScope, error) + SetResourceConfig(atc.Source, atc.VersionedResourceTypes) (ResourceConfigScope, error) SetCheckSetupError(error) error NotifyScan() error @@ -177,7 +176,7 @@ func (r *resource) Reload() (bool, error) { return true, nil } -func (r *resource) SetResourceConfig(logger lager.Logger, source atc.Source, resourceTypes creds.VersionedResourceTypes) (ResourceConfigScope, error) { +func (r *resource) SetResourceConfig(source atc.Source, resourceTypes atc.VersionedResourceTypes) (ResourceConfigScope, error) { resourceConfigDescriptor, err := constructResourceConfigDescriptor(r.type_, source, resourceTypes) if err != nil { return nil, err @@ -190,7 +189,7 @@ func (r *resource) SetResourceConfig(logger lager.Logger, source atc.Source, res defer Rollback(tx) - resourceConfig, err := resourceConfigDescriptor.findOrCreate(logger, tx, r.lockFactory, r.conn) + resourceConfig, err := resourceConfigDescriptor.findOrCreate(tx, r.lockFactory, r.conn) if err != nil { return nil, err } @@ -275,7 +274,7 @@ func (r *resource) SetCheckSetupError(cause error) error { // index for the pipeline because we want to ignore these versions until the // check orders get updated. The bumping of the index will be done in // SaveOutput for the put step. -func (r *resource) SaveUncheckedVersion(version atc.Version, metadata ResourceConfigMetadataFields, resourceConfig ResourceConfig, resourceTypes creds.VersionedResourceTypes) (bool, error) { +func (r *resource) SaveUncheckedVersion(version atc.Version, metadata ResourceConfigMetadataFields, resourceConfig ResourceConfig, resourceTypes atc.VersionedResourceTypes) (bool, error) { tx, err := r.conn.Begin() if err != nil { return false, err @@ -296,6 +295,37 @@ func (r *resource) SaveUncheckedVersion(version atc.Version, metadata ResourceCo return newVersion, tx.Commit() } +func (r *resource) UpdateMetadata(version atc.Version, metadata ResourceConfigMetadataFields) (bool, error) { + versionJSON, err := json.Marshal(version) + if err != nil { + return false, err + } + + metadataJSON, err := json.Marshal(metadata) + if err != nil { + return false, err + } + + _, err = psql.Update("resource_config_versions"). + Set("metadata", string(metadataJSON)). + Where(sq.Eq{ + "resource_config_scope_id": r.ResourceConfigScopeID(), + }). + Where(sq.Expr( + "version_md5 = md5(?)", versionJSON, + )). + RunWith(r.conn). + Exec() + + if err != nil { + if err == sql.ErrNoRows { + return false, nil + } + return false, err + } + return true, nil +} + func (r *resource) ResourceConfigVersionID(version atc.Version) (int, bool, error) { requestedVersion, err := json.Marshal(version) if err != nil { diff --git a/atc/db/resource_cache.go b/atc/db/resource_cache.go index 64b428f8e..24f5b6684 100644 --- a/atc/db/resource_cache.go +++ b/atc/db/resource_cache.go @@ -7,8 +7,6 @@ import ( "errors" "fmt" - "code.cloudfoundry.org/lager" - sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/db/lock" @@ -43,12 +41,11 @@ func (cache *ResourceCacheDescriptor) find(tx Tx, lockFactory lock.LockFactory, } func (cache *ResourceCacheDescriptor) findOrCreate( - logger lager.Logger, tx Tx, lockFactory lock.LockFactory, conn Conn, ) (UsedResourceCache, error) { - resourceConfig, err := cache.ResourceConfigDescriptor.findOrCreate(logger, tx, lockFactory, conn) + resourceConfig, err := cache.ResourceConfigDescriptor.findOrCreate(tx, lockFactory, conn) if err != nil { return nil, err } @@ -98,7 +95,6 @@ func (cache *ResourceCacheDescriptor) findOrCreate( } func (cache *ResourceCacheDescriptor) use( - logger lager.Logger, tx Tx, rc UsedResourceCache, user ResourceCacheUser, diff --git a/atc/db/resource_cache_factory.go b/atc/db/resource_cache_factory.go index c45a85c05..6fbb6900e 100644 --- a/atc/db/resource_cache_factory.go +++ b/atc/db/resource_cache_factory.go @@ -4,10 +4,8 @@ import ( "database/sql" "encoding/json" - "code.cloudfoundry.org/lager" sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db/lock" ) @@ -15,13 +13,12 @@ import ( type ResourceCacheFactory interface { FindOrCreateResourceCache( - logger lager.Logger, resourceCacheUser ResourceCacheUser, resourceTypeName string, version atc.Version, source atc.Source, params atc.Params, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, ) (UsedResourceCache, error) // changing resource cache to interface to allow updates on object is not feasible. @@ -45,13 +42,12 @@ func NewResourceCacheFactory(conn Conn, lockFactory lock.LockFactory) ResourceCa } func (f *resourceCacheFactory) FindOrCreateResourceCache( - logger lager.Logger, resourceCacheUser ResourceCacheUser, resourceTypeName string, version atc.Version, source atc.Source, params atc.Params, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, ) (UsedResourceCache, error) { resourceConfigDescriptor, err := constructResourceConfigDescriptor(resourceTypeName, source, resourceTypes) if err != nil { @@ -71,12 +67,12 @@ func (f *resourceCacheFactory) FindOrCreateResourceCache( defer Rollback(tx) - usedResourceCache, err := resourceCache.findOrCreate(logger, tx, f.lockFactory, f.conn) + usedResourceCache, err := resourceCache.findOrCreate(tx, f.lockFactory, f.conn) if err != nil { return nil, err } - err = resourceCache.use(logger, tx, usedResourceCache, resourceCacheUser) + err = resourceCache.use(tx, usedResourceCache, resourceCacheUser) if err != nil { return nil, err } diff --git a/atc/db/resource_cache_factory_test.go b/atc/db/resource_cache_factory_test.go index b81a3c45f..bbe4e404a 100644 --- a/atc/db/resource_cache_factory_test.go +++ b/atc/db/resource_cache_factory_test.go @@ -8,9 +8,7 @@ import ( "code.cloudfoundry.org/lager/lagertest" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" . "github.com/onsi/ginkgo" @@ -71,7 +69,7 @@ var _ = Describe("ResourceCacheFactory", func() { Name: "some-type-type", Type: "some-base-type", Source: atc.Source{ - "some-type-type": "((source-param))", + "some-type-type": "some-secret-sauce", }, }, Version: atc.Version{"some-type-type": "version"}, @@ -119,7 +117,6 @@ var _ = Describe("ResourceCacheFactory", func() { Describe("FindOrCreateResourceCache", func() { It("creates resource cache in database", func() { usedResourceCache, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-type", atc.Version{"some": "version"}, @@ -127,14 +124,11 @@ var _ = Describe("ResourceCacheFactory", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - resourceType1, - resourceType2, - resourceType3, - }, - ), + atc.VersionedResourceTypes{ + resourceType1, + resourceType2, + resourceType3, + }, ) Expect(err).ToNot(HaveOccurred()) Expect(usedResourceCache.Version()).To(Equal(atc.Version{"some": "version"})) @@ -200,7 +194,6 @@ var _ = Describe("ResourceCacheFactory", func() { It("returns an error if base resource type does not exist", func() { _, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-type-using-bogus-base-type", atc.Version{"some": "version"}, @@ -208,13 +201,10 @@ var _ = Describe("ResourceCacheFactory", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - resourceType1, - resourceTypeUsingBogusBaseType, - }, - ), + atc.VersionedResourceTypes{ + resourceType1, + resourceTypeUsingBogusBaseType, + }, ) Expect(err).To(HaveOccurred()) Expect(err).To(Equal(db.BaseResourceTypeNotFoundError{Name: "some-bogus-base-type"})) @@ -222,7 +212,6 @@ var _ = Describe("ResourceCacheFactory", func() { It("allows a base resource type to be overridden using itself", func() { usedResourceCache, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-image-type", atc.Version{"some": "version"}, @@ -230,12 +219,9 @@ var _ = Describe("ResourceCacheFactory", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - resourceTypeOverridingBaseType, - }, - ), + atc.VersionedResourceTypes{ + resourceTypeOverridingBaseType, + }, ) Expect(err).ToNot(HaveOccurred()) @@ -286,13 +272,12 @@ var _ = Describe("ResourceCacheFactory", func() { for i := 0; i < 100; i++ { _, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-base-resource-type", atc.Version{"some": "version"}, atc.Source{"some": "source"}, atc.Params{"some": "params"}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) } diff --git a/atc/db/resource_cache_lifecycle_test.go b/atc/db/resource_cache_lifecycle_test.go index 292ec1863..92650eb95 100644 --- a/atc/db/resource_cache_lifecycle_test.go +++ b/atc/db/resource_cache_lifecycle_test.go @@ -4,9 +4,7 @@ import ( "fmt" "time" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/algorithm" @@ -175,15 +173,11 @@ var _ = Describe("ResourceCacheLifecycle", func() { BeforeEach(func() { var err error resourceConfig, err := resourceConfigFactory.FindOrCreateResourceConfig( - logger, "some-base-resource-type", atc.Source{ "some": "source", }, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{}, - ), + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) @@ -230,26 +224,22 @@ var _ = Describe("ResourceCacheLifecycle", func() { Context("when the cache is for a custom resource type", func() { It("does not remove the cache if the type is still configured", func() { _, err := resourceConfigFactory.FindOrCreateResourceConfig( - logger, "some-type", atc.Source{ "some": "source", }, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - atc.VersionedResourceType{ - ResourceType: atc.ResourceType{ - Name: "some-type", - Type: "some-base-resource-type", - Source: atc.Source{ - "some": "source", - }, + atc.VersionedResourceTypes{ + atc.VersionedResourceType{ + ResourceType: atc.ResourceType{ + Name: "some-type", + Type: "some-base-resource-type", + Source: atc.Source{ + "some": "source", }, - Version: atc.Version{"showme": "whatyougot"}, }, + Version: atc.Version{"showme": "whatyougot"}, }, - ), + }, ) Expect(err).ToNot(HaveOccurred()) @@ -262,26 +252,22 @@ var _ = Describe("ResourceCacheLifecycle", func() { It("removes the cache if the type is no longer configured", func() { _, err := resourceConfigFactory.FindOrCreateResourceConfig( - logger, "some-type", atc.Source{ "some": "source", }, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - atc.VersionedResourceType{ - ResourceType: atc.ResourceType{ - Name: "some-type", - Type: "some-base-resource-type", - Source: atc.Source{ - "some": "source", - }, + atc.VersionedResourceTypes{ + atc.VersionedResourceType{ + ResourceType: atc.ResourceType{ + Name: "some-type", + Type: "some-base-resource-type", + Source: atc.Source{ + "some": "source", }, - Version: atc.Version{"showme": "whatyougot"}, }, + Version: atc.Version{"showme": "whatyougot"}, }, - ), + }, ) Expect(err).ToNot(HaveOccurred()) @@ -306,12 +292,8 @@ var _ = Describe("ResourceCacheLifecycle", func() { Expect(err).ToNot(HaveOccurred()) resourceConfigScope, err := defaultResource.SetResourceConfig( - logger, atc.Source{"some": "source"}, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{}, - ), + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) @@ -372,7 +354,6 @@ func countResourceCaches() int { func createResourceCacheWithUser(resourceCacheUser db.ResourceCacheUser) db.UsedResourceCache { usedResourceCache, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, resourceCacheUser, "some-base-resource-type", atc.Version{"some": "version"}, @@ -380,10 +361,7 @@ func createResourceCacheWithUser(resourceCacheUser db.ResourceCacheUser) db.Used "some": "source", }, atc.Params{"some": fmt.Sprintf("param-%d", time.Now().UnixNano())}, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{}, - ), + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) diff --git a/atc/db/resource_cache_test.go b/atc/db/resource_cache_test.go index 4e2dc8412..77a2d7a87 100644 --- a/atc/db/resource_cache_test.go +++ b/atc/db/resource_cache_test.go @@ -2,9 +2,7 @@ package db_test import ( sq "github.com/Masterminds/squirrel" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/lib/pq" . "github.com/onsi/ginkgo" @@ -42,7 +40,6 @@ var _ = Describe("ResourceCache", func() { It("can be created and used", func() { urc, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-worker-resource-type", atc.Version{"some": "version"}, @@ -50,10 +47,7 @@ var _ = Describe("ResourceCache", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{}, - ), + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) Expect(urc.ID()).ToNot(BeZero()) @@ -70,7 +64,6 @@ var _ = Describe("ResourceCache", func() { BeforeEach(func() { var err error existingResourceCache, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-worker-resource-type", atc.Version{"some": "version"}, @@ -78,17 +71,13 @@ var _ = Describe("ResourceCache", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{}, - ), + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) }) It("returns the same used resource cache", func() { urc, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-worker-resource-type", atc.Version{"some": "version"}, @@ -96,10 +85,7 @@ var _ = Describe("ResourceCache", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{}, - ), + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) Expect(urc.ID()).To(Equal(existingResourceCache.ID())) @@ -127,7 +113,6 @@ var _ = Describe("ResourceCache", func() { Expect(err).ToNot(HaveOccurred()) urc, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForContainer(container.ID()), "some-worker-resource-type", atc.Version{"some-type": "version"}, @@ -135,9 +120,7 @@ var _ = Describe("ResourceCache", func() { "cache": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes(template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{}, - ), + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) }) @@ -156,7 +139,6 @@ var _ = Describe("ResourceCache", func() { BeforeEach(func() { var err error existingResourceCache, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForContainer(container.ID()), "some-worker-resource-type", atc.Version{"some-type": "version"}, @@ -164,9 +146,7 @@ var _ = Describe("ResourceCache", func() { "cache": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes(template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{}, - ), + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) }) diff --git a/atc/db/resource_config.go b/atc/db/resource_config.go index fad3a1493..c31841734 100644 --- a/atc/db/resource_config.go +++ b/atc/db/resource_config.go @@ -6,11 +6,8 @@ import ( "fmt" "strconv" - "code.cloudfoundry.org/lager" - sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db/lock" ) @@ -126,7 +123,7 @@ func (r *resourceConfig) FindResourceConfigScopeByID(resourceConfigScopeID int, lockFactory: r.lockFactory}, true, nil } -func (r *ResourceConfigDescriptor) findOrCreate(logger lager.Logger, tx Tx, lockFactory lock.LockFactory, conn Conn) (ResourceConfig, error) { +func (r *ResourceConfigDescriptor) findOrCreate(tx Tx, lockFactory lock.LockFactory, conn Conn) (ResourceConfig, error) { rc := &resourceConfig{ lockFactory: lockFactory, conn: conn, @@ -137,7 +134,7 @@ func (r *ResourceConfigDescriptor) findOrCreate(logger lager.Logger, tx Tx, lock if r.CreatedByResourceCache != nil { parentColumnName = "resource_cache_id" - resourceCache, err := r.CreatedByResourceCache.findOrCreate(logger, tx, lockFactory, conn) + resourceCache, err := r.CreatedByResourceCache.findOrCreate(tx, lockFactory, conn) if err != nil { return nil, err } @@ -284,7 +281,7 @@ func (r *ResourceConfigDescriptor) findWithParentID(tx Tx, parentColumnName stri return id, true, nil } -func findOrCreateResourceConfigScope(tx Tx, conn Conn, lockFactory lock.LockFactory, resourceConfig ResourceConfig, resource Resource, resourceTypes creds.VersionedResourceTypes) (ResourceConfigScope, error) { +func findOrCreateResourceConfigScope(tx Tx, conn Conn, lockFactory lock.LockFactory, resourceConfig ResourceConfig, resource Resource, resourceTypes atc.VersionedResourceTypes) (ResourceConfigScope, error) { var unique bool var uniqueResource Resource var resourceID *int diff --git a/atc/db/resource_config_check_session_lifecycle_test.go b/atc/db/resource_config_check_session_lifecycle_test.go index e6f6e8ba6..eac2a0496 100644 --- a/atc/db/resource_config_check_session_lifecycle_test.go +++ b/atc/db/resource_config_check_session_lifecycle_test.go @@ -5,7 +5,6 @@ import ( sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" . "github.com/onsi/ginkgo" @@ -23,13 +22,13 @@ var _ = Describe("ResourceConfigCheckSessionLifecycle", func() { Describe("CleanInactiveResourceConfigCheckSessions", func() { expiry := db.ContainerOwnerExpiries{ - Min: 1 * time.Minute, - Max: 1 * time.Minute, + Min: 1 * time.Minute, + Max: 1 * time.Minute, } Context("for resources", func() { findOrCreateSessionForDefaultResource := func() int { - resourceConfigScope, err := defaultResource.SetResourceConfig(logger, defaultResource.Source(), creds.VersionedResourceTypes{}) + resourceConfigScope, err := defaultResource.SetResourceConfig(defaultResource.Source(), atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) owner := db.NewResourceConfigCheckSessionContainerOwner(resourceConfigScope.ResourceConfig(), expiry) @@ -123,9 +122,9 @@ var _ = Describe("ResourceConfigCheckSessionLifecycle", func() { Context("for resource types", func() { findOrCreateSessionForDefaultResourceType := func() int { - resourceConfigScope, err := defaultResourceType.SetResourceConfig(logger, + resourceConfigScope, err := defaultResourceType.SetResourceConfig( defaultResourceType.Source(), - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) diff --git a/atc/db/resource_config_factory.go b/atc/db/resource_config_factory.go index 2abbeef39..588e9d553 100644 --- a/atc/db/resource_config_factory.go +++ b/atc/db/resource_config_factory.go @@ -5,10 +5,8 @@ import ( "fmt" "strconv" - "code.cloudfoundry.org/lager" sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db/lock" "github.com/lib/pq" ) @@ -25,10 +23,9 @@ func (e ErrCustomResourceTypeVersionNotFound) Error() string { type ResourceConfigFactory interface { FindOrCreateResourceConfig( - logger lager.Logger, resourceType string, source atc.Source, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, ) (ResourceConfig, error) FindResourceConfigByID(int) (ResourceConfig, bool, error) @@ -73,10 +70,9 @@ func (f *resourceConfigFactory) FindResourceConfigByID(resourceConfigID int) (Re } func (f *resourceConfigFactory) FindOrCreateResourceConfig( - logger lager.Logger, resourceType string, source atc.Source, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, ) (ResourceConfig, error) { resourceConfigDescriptor, err := constructResourceConfigDescriptor(resourceType, source, resourceTypes) @@ -90,7 +86,7 @@ func (f *resourceConfigFactory) FindOrCreateResourceConfig( } defer Rollback(tx) - resourceConfig, err := resourceConfigDescriptor.findOrCreate(logger, tx, f.lockFactory, f.conn) + resourceConfig, err := resourceConfigDescriptor.findOrCreate(tx, f.lockFactory, f.conn) if err != nil { return nil, err } @@ -109,7 +105,7 @@ func (f *resourceConfigFactory) FindOrCreateResourceConfig( func constructResourceConfigDescriptor( resourceTypeName string, source atc.Source, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, ) (ResourceConfigDescriptor, error) { resourceConfigDescriptor := ResourceConfigDescriptor{ Source: source, @@ -117,14 +113,9 @@ func constructResourceConfigDescriptor( customType, found := resourceTypes.Lookup(resourceTypeName) if found { - source, err := customType.Source.Evaluate() - if err != nil { - return ResourceConfigDescriptor{}, err - } - customTypeResourceConfig, err := constructResourceConfigDescriptor( customType.Type, - source, + customType.Source, resourceTypes.Without(customType.Name), ) if err != nil { diff --git a/atc/db/resource_config_factory_test.go b/atc/db/resource_config_factory_test.go index 73619d68e..5cc606aa1 100644 --- a/atc/db/resource_config_factory_test.go +++ b/atc/db/resource_config_factory_test.go @@ -3,9 +3,7 @@ package db_test import ( "sync" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -62,7 +60,7 @@ var _ = Describe("ResourceConfigFactory", func() { defer wg.Done() for i := 0; i < 100; i++ { - _, err := resourceConfigFactory.FindOrCreateResourceConfig(logger, "some-base-resource-type", atc.Source{"some": "unique-source"}, creds.VersionedResourceTypes{}) + _, err := resourceConfigFactory.FindOrCreateResourceConfig("some-base-resource-type", atc.Source{"some": "unique-source"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) } }() @@ -100,7 +98,7 @@ var _ = Describe("ResourceConfigFactory", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - createdResourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig(logger, "base-resource-type-name", atc.Source{}, creds.VersionedResourceTypes{}) + createdResourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig("base-resource-type-name", atc.Source{}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) Expect(createdResourceConfig).ToNot(BeNil()) @@ -120,7 +118,7 @@ var _ = Describe("ResourceConfigFactory", func() { pipelineResourceTypes, err := defaultPipeline.ResourceTypes() Expect(err).ToNot(HaveOccurred()) - createdResourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig(logger, "some-type", atc.Source{}, creds.NewVersionedResourceTypes(template.StaticVariables{}, pipelineResourceTypes.Deserialize())) + createdResourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig("some-type", atc.Source{}, pipelineResourceTypes.Deserialize()) Expect(err).ToNot(HaveOccurred()) Expect(createdResourceConfig).ToNot(BeNil()) diff --git a/atc/db/resource_config_scope.go b/atc/db/resource_config_scope.go index 5bd36ab00..35b6dbd4a 100644 --- a/atc/db/resource_config_scope.go +++ b/atc/db/resource_config_scope.go @@ -34,7 +34,6 @@ type ResourceConfigScope interface { AcquireResourceCheckingLock( logger lager.Logger, - interval time.Duration, ) (lock.Lock, bool, error) UpdateLastCheckStartTime( @@ -181,7 +180,6 @@ func (r *resourceConfigScope) SetCheckError(cause error) error { func (r *resourceConfigScope) AcquireResourceCheckingLock( logger lager.Logger, - interval time.Duration, ) (lock.Lock, bool, error) { return r.lockFactory.Acquire( logger, diff --git a/atc/db/resource_config_scope_test.go b/atc/db/resource_config_scope_test.go index d0bdbd323..be356bfa9 100644 --- a/atc/db/resource_config_scope_test.go +++ b/atc/db/resource_config_scope_test.go @@ -3,9 +3,7 @@ package db_test import ( "time" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/lock" . "github.com/onsi/ginkgo" @@ -44,7 +42,7 @@ var _ = Describe("Resource Config Scope", func() { Expect(err).NotTo(HaveOccurred()) Expect(found).To(BeTrue()) - resourceScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + resourceScope, err = resource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) }) @@ -202,9 +200,8 @@ var _ = Describe("Resource Config Scope", func() { Expect(err).ToNot(HaveOccurred()) resourceConfigScope, err = someResource.SetResourceConfig( - logger, someResource.Source(), - creds.NewVersionedResourceTypes(template.StaticVariables{}, pipelineResourceTypes.Deserialize()), + pipelineResourceTypes.Deserialize(), ) Expect(err).ToNot(HaveOccurred()) }) @@ -282,9 +279,8 @@ var _ = Describe("Resource Config Scope", func() { Expect(err).ToNot(HaveOccurred()) resourceConfigScope, err = someResource.SetResourceConfig( - logger, someResource.Source(), - creds.NewVersionedResourceTypes(template.StaticVariables{}, pipelineResourceTypes.Deserialize()), + pipelineResourceTypes.Deserialize(), ) Expect(err).ToNot(HaveOccurred()) }) @@ -317,9 +313,8 @@ var _ = Describe("Resource Config Scope", func() { Expect(err).ToNot(HaveOccurred()) resourceConfigScope, err = someResource.SetResourceConfig( - logger, someResource.Source(), - creds.NewVersionedResourceTypes(template.StaticVariables{}, pipelineResourceTypes.Deserialize()), + pipelineResourceTypes.Deserialize(), ) Expect(err).ToNot(HaveOccurred()) }) @@ -331,7 +326,7 @@ var _ = Describe("Resource Config Scope", func() { BeforeEach(func() { var err error var acquired bool - lock, acquired, err = resourceConfigScope.AcquireResourceCheckingLock(logger, 1*time.Second) + lock, acquired, err = resourceConfigScope.AcquireResourceCheckingLock(logger) Expect(err).ToNot(HaveOccurred()) Expect(acquired).To(BeTrue()) }) @@ -341,7 +336,7 @@ var _ = Describe("Resource Config Scope", func() { }) It("does not get the lock", func() { - _, acquired, err := resourceConfigScope.AcquireResourceCheckingLock(logger, 1*time.Second) + _, acquired, err := resourceConfigScope.AcquireResourceCheckingLock(logger) Expect(err).ToNot(HaveOccurred()) Expect(acquired).To(BeFalse()) }) @@ -353,7 +348,7 @@ var _ = Describe("Resource Config Scope", func() { }) It("gets the lock", func() { - lock, acquired, err := resourceConfigScope.AcquireResourceCheckingLock(logger, 1*time.Second) + lock, acquired, err := resourceConfigScope.AcquireResourceCheckingLock(logger) Expect(err).ToNot(HaveOccurred()) Expect(acquired).To(BeTrue()) @@ -365,12 +360,12 @@ var _ = Describe("Resource Config Scope", func() { Context("when there has not been a check recently", func() { It("gets and keeps the lock and stops others from periodically getting it", func() { - lock, acquired, err := resourceConfigScope.AcquireResourceCheckingLock(logger, 1*time.Second) + lock, acquired, err := resourceConfigScope.AcquireResourceCheckingLock(logger) Expect(err).ToNot(HaveOccurred()) Expect(acquired).To(BeTrue()) Consistently(func() bool { - _, acquired, err = resourceConfigScope.AcquireResourceCheckingLock(logger, 1*time.Second) + _, acquired, err = resourceConfigScope.AcquireResourceCheckingLock(logger) Expect(err).ToNot(HaveOccurred()) return acquired @@ -381,7 +376,7 @@ var _ = Describe("Resource Config Scope", func() { time.Sleep(time.Second) - lock, acquired, err = resourceConfigScope.AcquireResourceCheckingLock(logger, 1*time.Second) + lock, acquired, err = resourceConfigScope.AcquireResourceCheckingLock(logger) Expect(err).ToNot(HaveOccurred()) Expect(acquired).To(BeTrue()) diff --git a/atc/db/resource_config_test.go b/atc/db/resource_config_test.go index 92b055edb..dd954ee9e 100644 --- a/atc/db/resource_config_test.go +++ b/atc/db/resource_config_test.go @@ -2,7 +2,6 @@ package db_test import ( "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" . "github.com/onsi/ginkgo" @@ -12,7 +11,7 @@ import ( var _ = Describe("ResourceConfig", func() { Describe("FindResourceConfigScopeByID", func() { var pipeline db.Pipeline - var resourceTypes creds.VersionedResourceTypes + var resourceTypes atc.VersionedResourceTypes BeforeEach(func() { atc.EnableGlobalResources = true @@ -38,7 +37,7 @@ var _ = Describe("ResourceConfig", func() { Expect(err).ToNot(HaveOccurred()) Expect(created).To(BeTrue()) - resourceTypes = creds.VersionedResourceTypes{} + resourceTypes = atc.VersionedResourceTypes{} }) Context("when a shared resource config scope exists", func() { @@ -64,7 +63,7 @@ var _ = Describe("ResourceConfig", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - scope, err = resource.SetResourceConfig(logger, atc.Source{"some": "repository"}, resourceTypes) + scope, err = resource.SetResourceConfig(atc.Source{"some": "repository"}, resourceTypes) Expect(err).ToNot(HaveOccurred()) }) @@ -101,7 +100,7 @@ var _ = Describe("ResourceConfig", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - scope, err = resource.SetResourceConfig(logger, atc.Source{"some": "repository"}, resourceTypes) + scope, err = resource.SetResourceConfig(atc.Source{"some": "repository"}, resourceTypes) Expect(err).ToNot(HaveOccurred()) }) @@ -133,7 +132,7 @@ var _ = Describe("ResourceConfig", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - resourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig(logger, "some-type", atc.Source{"some": "repository"}, resourceTypes) + resourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig("some-type", atc.Source{"some": "repository"}, resourceTypes) Expect(err).ToNot(HaveOccurred()) var found bool diff --git a/atc/db/resource_test.go b/atc/db/resource_test.go index d4362d0d6..767c4d237 100644 --- a/atc/db/resource_test.go +++ b/atc/db/resource_test.go @@ -4,9 +4,7 @@ import ( "errors" "strconv" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/algorithm" . "github.com/onsi/ginkgo" @@ -138,7 +136,7 @@ var _ = Describe("Resource", func() { versionsDB, err = pipeline.LoadVersionsDB() Expect(err).ToNot(HaveOccurred()) - resourceScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + resourceScope, err = resource.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) err = resourceScope.SetCheckError(errors.New("oops")) @@ -165,7 +163,7 @@ var _ = Describe("Resource", func() { }) It("does not bump the cache index", func() { - resourceScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + resourceScope, err = resource.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) cachedVersionsDB, err := pipeline.LoadVersionsDB() @@ -278,7 +276,7 @@ var _ = Describe("Resource", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - resourceScope1, err = resource1.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + resourceScope1, err = resource1.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) err = resourceScope1.SetCheckError(errors.New("oops")) @@ -303,7 +301,7 @@ var _ = Describe("Resource", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceScope2, err = resource2.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + resourceScope2, err = resource2.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) found, err = resource2.Reload() @@ -348,7 +346,7 @@ var _ = Describe("Resource", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - resourceScope1, err = resource1.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + resourceScope1, err = resource1.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) found, err = resource1.Reload() @@ -369,7 +367,7 @@ var _ = Describe("Resource", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceScope2, err = resource2.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + resourceScope2, err = resource2.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) found, err = resource2.Reload() @@ -416,7 +414,7 @@ var _ = Describe("Resource", func() { resourceTypes, err = pipeline.ResourceTypes() Expect(err).ToNot(HaveOccurred()) - resourceScope1, err = resource1.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.NewVersionedResourceTypes(template.StaticVariables{}, resourceTypes.Deserialize())) + resourceScope1, err = resource1.SetResourceConfig(atc.Source{"some": "repository"}, resourceTypes.Deserialize()) Expect(err).NotTo(HaveOccurred()) found, err = resource1.Reload() @@ -438,7 +436,7 @@ var _ = Describe("Resource", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceScope2, err = resource2.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.NewVersionedResourceTypes(template.StaticVariables{}, resourceTypes.Deserialize())) + resourceScope2, err = resource2.SetResourceConfig(atc.Source{"some": "repository"}, resourceTypes.Deserialize()) Expect(err).NotTo(HaveOccurred()) found, err = resource2.Reload() @@ -470,7 +468,7 @@ var _ = Describe("Resource", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - newResourceConfigScope, err = resource1.SetResourceConfig(logger, atc.Source{"some": "other-repo"}, creds.NewVersionedResourceTypes(template.StaticVariables{}, resourceTypes.Deserialize())) + newResourceConfigScope, err = resource1.SetResourceConfig(atc.Source{"some": "other-repo"}, resourceTypes.Deserialize()) Expect(err).NotTo(HaveOccurred()) found, err = resource1.Reload() @@ -505,7 +503,7 @@ var _ = Describe("Resource", func() { resourceTypes, err = newPipeline.ResourceTypes() Expect(err).ToNot(HaveOccurred()) - newResourceConfigScope, err = resource1.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.NewVersionedResourceTypes(template.StaticVariables{}, resourceTypes.Deserialize())) + newResourceConfigScope, err = resource1.SetResourceConfig(atc.Source{"some": "repository"}, resourceTypes.Deserialize()) Expect(err).NotTo(HaveOccurred()) found, err = resource1.Reload() @@ -555,10 +553,10 @@ var _ = Describe("Resource", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - _, err = resource1.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + _, err = resource1.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) - _, err = resource2.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + _, err = resource2.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) found, err = resource1.Reload() @@ -664,7 +662,7 @@ var _ = Describe("Resource", func() { Expect(err).ToNot(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - resourceScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + resourceScope, err = resource.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceScope.SaveVersions([]atc.Version{version}) @@ -685,7 +683,7 @@ var _ = Describe("Resource", func() { Context("when the check order is 0", func() { BeforeEach(func() { version = atc.Version{"version": "2"} - created, err := resource.SaveUncheckedVersion(version, nil, resourceScope.ResourceConfig(), creds.VersionedResourceTypes{}) + created, err := resource.SaveUncheckedVersion(version, nil, resourceScope.ResourceConfig(), atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) Expect(created).To(BeTrue()) }) @@ -710,7 +708,7 @@ var _ = Describe("Resource", func() { _, err = brt.FindOrCreate(setupTx, false) Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - _, err = resourceConfigFactory.FindOrCreateResourceConfig(logger, "registry-image", atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + _, err = resourceConfigFactory.FindOrCreateResourceConfig("registry-image", atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) }) @@ -748,7 +746,7 @@ var _ = Describe("Resource", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "other-repository"}, creds.VersionedResourceTypes{}) + resourceScope, err = resource.SetResourceConfig(atc.Source{"some": "other-repository"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) originalVersionSlice = []atc.Version{ @@ -789,6 +787,10 @@ var _ = Describe("Resource", func() { resourceVersions = append(resourceVersions, resourceVersion) } + + reloaded, err := resource.Reload() + Expect(err).ToNot(HaveOccurred()) + Expect(reloaded).To(BeTrue()) }) Context("with no since/until", func() { @@ -861,7 +863,7 @@ var _ = Describe("Resource", func() { metadata := []db.ResourceConfigMetadataField{{Name: "name1", Value: "value1"}} // save metadata - _, err := resource.SaveUncheckedVersion(atc.Version(resourceVersions[9].Version), metadata, resourceScope.ResourceConfig(), creds.VersionedResourceTypes{}) + _, err := resource.SaveUncheckedVersion(atc.Version(resourceVersions[9].Version), metadata, resourceScope.ResourceConfig(), atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) }) @@ -889,6 +891,27 @@ var _ = Describe("Resource", func() { Expect(historyPage).To(ConsistOf([]atc.ResourceVersion{resourceVersions[9]})) }) }) + + Context("when the version metadata is updated", func() { + var metadata db.ResourceConfigMetadataFields + + BeforeEach(func() { + metadata = []db.ResourceConfigMetadataField{{Name: "name1", Value: "value1"}} + + updated, err := resource.UpdateMetadata(resourceVersions[9].Version, metadata) + Expect(err).ToNot(HaveOccurred()) + Expect(updated).To(BeTrue()) + }) + + It("returns a version with metadata updated", func() { + historyPage, _, found, err := resource.Versions(db.Page{Limit: 1}) + Expect(err).ToNot(HaveOccurred()) + Expect(found).To(BeTrue()) + Expect(len(historyPage)).To(Equal(1)) + Expect(historyPage[0].Version).To(Equal(resourceVersions[9].Version)) + Expect(historyPage[0].Metadata).To(Equal([]atc.MetadataField{{Name: "name1", Value: "value1"}})) + }) + }) }) Context("when check orders are different than versions ids", func() { @@ -911,7 +934,7 @@ var _ = Describe("Resource", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceScope, err = resource.SetResourceConfig(logger, atc.Source{"some": "other-repository"}, creds.VersionedResourceTypes{}) + resourceScope, err = resource.SetResourceConfig(atc.Source{"some": "other-repository"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) originalVersionSlice := []atc.Version{ @@ -1044,10 +1067,10 @@ var _ = Describe("Resource", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - resourceScope, err := resource.SetResourceConfig(logger, atc.Source{"some": "other-repository"}, creds.VersionedResourceTypes{}) + resourceScope, err := resource.SetResourceConfig(atc.Source{"some": "other-repository"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) - created, err := resource.SaveUncheckedVersion(atc.Version{"version": "not-returned"}, nil, resourceScope.ResourceConfig(), creds.VersionedResourceTypes{}) + created, err := resource.SaveUncheckedVersion(atc.Version{"version": "not-returned"}, nil, resourceScope.ResourceConfig(), atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) Expect(created).To(BeTrue()) }) @@ -1084,7 +1107,7 @@ var _ = Describe("Resource", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - resourceScope, err := resource.SetResourceConfig(logger, atc.Source{"some": "other-repository"}, creds.VersionedResourceTypes{}) + resourceScope, err := resource.SetResourceConfig(atc.Source{"some": "other-repository"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceScope.SaveVersions([]atc.Version{ @@ -1168,7 +1191,7 @@ var _ = Describe("Resource", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - resourceScope, err := resource.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + resourceScope, err := resource.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = resourceScope.SaveVersions([]atc.Version{ diff --git a/atc/db/resource_type.go b/atc/db/resource_type.go index 3c858ab5d..a7f424da7 100644 --- a/atc/db/resource_type.go +++ b/atc/db/resource_type.go @@ -6,10 +6,8 @@ import ( "errors" "fmt" - "code.cloudfoundry.org/lager" sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db/lock" ) @@ -36,7 +34,7 @@ type ResourceType interface { CheckError() error UniqueVersionHistory() bool - SetResourceConfig(lager.Logger, atc.Source, creds.VersionedResourceTypes) (ResourceConfigScope, error) + SetResourceConfig(atc.Source, atc.VersionedResourceTypes) (ResourceConfigScope, error) SetCheckSetupError(error) error Version() atc.Version @@ -146,7 +144,7 @@ func (t *resourceType) Reload() (bool, error) { return true, nil } -func (t *resourceType) SetResourceConfig(logger lager.Logger, source atc.Source, resourceTypes creds.VersionedResourceTypes) (ResourceConfigScope, error) { +func (t *resourceType) SetResourceConfig(source atc.Source, resourceTypes atc.VersionedResourceTypes) (ResourceConfigScope, error) { resourceConfigDescriptor, err := constructResourceConfigDescriptor(t.type_, source, resourceTypes) if err != nil { return nil, err @@ -159,7 +157,7 @@ func (t *resourceType) SetResourceConfig(logger lager.Logger, source atc.Source, defer Rollback(tx) - resourceConfig, err := resourceConfigDescriptor.findOrCreate(logger, tx, t.lockFactory, t.conn) + resourceConfig, err := resourceConfigDescriptor.findOrCreate(tx, t.lockFactory, t.conn) if err != nil { return nil, err } diff --git a/atc/db/resource_type_test.go b/atc/db/resource_type_test.go index dd08d78d0..ecb0c0fa2 100644 --- a/atc/db/resource_type_test.go +++ b/atc/db/resource_type_test.go @@ -4,7 +4,6 @@ import ( "errors" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -202,7 +201,7 @@ var _ = Describe("ResourceType", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - resourceTypeScope, err = resourceType.SetResourceConfig(logger, atc.Source{"some": "repository"}, creds.VersionedResourceTypes{}) + resourceTypeScope, err = resourceType.SetResourceConfig(atc.Source{"some": "repository"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) }) diff --git a/atc/db/team.go b/atc/db/team.go index 1089616c7..2636ac08b 100644 --- a/atc/db/team.go +++ b/atc/db/team.go @@ -7,7 +7,6 @@ import ( "fmt" "time" - "code.cloudfoundry.org/lager" sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/creds" @@ -54,12 +53,12 @@ type Team interface { Workers() ([]Worker, error) FindVolumeForWorkerArtifact(int) (CreatedVolume, bool, error) - Containers(lager.Logger) ([]Container, error) + Containers() ([]Container, error) IsCheckContainer(string) (bool, error) IsContainerWithinTeam(string, bool) (bool, error) FindContainerByHandle(string) (Container, bool, error) - FindCheckContainers(lager.Logger, string, string, creds.Secrets) ([]Container, map[int]time.Time, error) + FindCheckContainers(string, string, creds.Secrets) ([]Container, map[int]time.Time, error) FindContainersByMetadata(ContainerMetadata) ([]Container, error) FindCreatedContainerByHandle(string) (CreatedContainer, bool, error) FindWorkerForContainer(handle string) (Worker, bool, error) @@ -152,9 +151,7 @@ func (t *team) FindWorkerForVolume(handle string) (Worker, bool, error) { })) } -func (t *team) Containers( - logger lager.Logger, -) ([]Container, error) { +func (t *team) Containers() ([]Container, error) { rows, err := selectContainers("c"). Join("workers w ON c.worker_name = w.name"). Join("resource_config_check_sessions rccs ON rccs.id = c.resource_config_check_session_id"). @@ -808,7 +805,7 @@ func (t *team) UpdateProviderAuth(auth atc.TeamAuth) error { return tx.Commit() } -func (t *team) FindCheckContainers(logger lager.Logger, pipelineName string, resourceName string, secretManager creds.Secrets) ([]Container, map[int]time.Time, error) { +func (t *team) FindCheckContainers(pipelineName string, resourceName string, secretManager creds.Secrets) ([]Container, map[int]time.Time, error) { pipeline, found, err := t.Pipeline(pipelineName) if err != nil { return nil, nil, err @@ -839,12 +836,16 @@ func (t *team) FindCheckContainers(logger lager.Logger, pipelineName string, res return nil, nil, err } + resourceTypes, err := creds.NewVersionedResourceTypes(variables, versionedResourceTypes).Evaluate() + if err != nil { + return nil, nil, err + } + resourceConfigFactory := NewResourceConfigFactory(t.conn, t.lockFactory) resourceConfig, err := resourceConfigFactory.FindOrCreateResourceConfig( - logger, resource.Type(), source, - creds.NewVersionedResourceTypes(variables, versionedResourceTypes), + resourceTypes, ) if err != nil { return nil, nil, err @@ -903,8 +904,8 @@ func (t *team) FindCheckContainers(logger lager.Logger, pipelineName string, res } type UpdateName struct { - OldName string - NewName string + OldName string + NewName string } func (t *team) updateName(tx Tx, jobs []atc.JobConfig, pipelineID int) error { @@ -916,7 +917,7 @@ func (t *team) updateName(tx Tx, jobs []atc.JobConfig, pipelineID int) error { err := psql.Select("COUNT(*) as count"). From("jobs"). Where(sq.Eq{ - "name": job.OldName, + "name": job.OldName, "pipeline_id": pipelineID}). RunWith(tx). QueryRow(). @@ -947,9 +948,9 @@ func (t *team) updateName(tx Tx, jobs []atc.JobConfig, pipelineID int) error { for _, updateName := range jobsToUpdate { _, err := psql.Delete("jobs"). Where(sq.Eq{ - "name": updateName.NewName, + "name": updateName.NewName, "pipeline_id": pipelineID, - "active": false}). + "active": false}). RunWith(tx). Exec() if err != nil { @@ -985,10 +986,10 @@ func checkCyclic(jobNames []UpdateName, curr string, visited map[int]bool) bool func sortUpdateNames(jobNames []UpdateName) []UpdateName { newMap := make(map[string]int) for i, job := range jobNames { - newMap[job.NewName] = i+1 + newMap[job.NewName] = i + 1 if newMap[job.OldName] != 0 { - index := newMap[job.OldName]-1 + index := newMap[job.OldName] - 1 tempJob := jobNames[index] jobNames[index] = job diff --git a/atc/db/team_test.go b/atc/db/team_test.go index 484ed6800..7ef7cfb16 100644 --- a/atc/db/team_test.go +++ b/atc/db/team_test.go @@ -6,9 +6,7 @@ import ( "strconv" "time" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/creds/credsfakes" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/event" @@ -430,14 +428,14 @@ var _ = Describe("Team", func() { Expect(err).ToNot(HaveOccurred()) expiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 1 * time.Hour, + Min: 5 * time.Minute, + Max: 1 * time.Hour, } pipelineResourceTypes, err := defaultPipeline.ResourceTypes() Expect(err).ToNot(HaveOccurred()) - resourceConfigScope, err = defaultResource.SetResourceConfig(logger, defaultResource.Source(), creds.NewVersionedResourceTypes(template.StaticVariables{}, pipelineResourceTypes.Deserialize())) + resourceConfigScope, err = defaultResource.SetResourceConfig(defaultResource.Source(), pipelineResourceTypes.Deserialize()) Expect(err).ToNot(HaveOccurred()) resourceContainer, err = defaultWorker.CreateContainer( @@ -448,7 +446,7 @@ var _ = Describe("Team", func() { }) It("finds all the containers", func() { - containers, err := defaultTeam.Containers(logger) + containers, err := defaultTeam.Containers() Expect(err).ToNot(HaveOccurred()) Expect(containers).To(HaveLen(2)) @@ -456,7 +454,7 @@ var _ = Describe("Team", func() { }) It("does not find containers for other teams", func() { - containers, err := otherTeam.Containers(logger) + containers, err := otherTeam.Containers() Expect(err).ToNot(HaveOccurred()) Expect(containers).To(BeEmpty()) }) @@ -479,11 +477,11 @@ var _ = Describe("Team", func() { Expect(err).ToNot(HaveOccurred()) expiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 1 * time.Hour, + Min: 5 * time.Minute, + Max: 1 * time.Hour, } - resourceConfigScope, err = defaultResource.SetResourceConfig(logger, defaultResource.Source(), creds.VersionedResourceTypes{}) + resourceConfigScope, err = defaultResource.SetResourceConfig(defaultResource.Source(), atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) resourceContainer, err = worker.CreateContainer( @@ -496,7 +494,7 @@ var _ = Describe("Team", func() { }) It("finds the container", func() { - containers, err := defaultTeam.Containers(logger) + containers, err := defaultTeam.Containers() Expect(err).ToNot(HaveOccurred()) Expect(containers).To(HaveLen(1)) @@ -557,11 +555,11 @@ var _ = Describe("Team", func() { Expect(err).ToNot(HaveOccurred()) expiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 1 * time.Hour, + Min: 5 * time.Minute, + Max: 1 * time.Hour, } - resourceConfigScope, err = otherResource.SetResourceConfig(logger, otherResource.Source(), creds.VersionedResourceTypes{}) + resourceConfigScope, err = otherResource.SetResourceConfig(otherResource.Source(), atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) resource2Container, err = worker.CreateContainer( @@ -574,7 +572,7 @@ var _ = Describe("Team", func() { }) It("returns the container only from the team", func() { - containers, err := otherTeam.Containers(logger) + containers, err := otherTeam.Containers() Expect(err).ToNot(HaveOccurred()) Expect(containers).To(HaveLen(1)) @@ -589,11 +587,11 @@ var _ = Describe("Team", func() { BeforeEach(func() { expiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 1 * time.Hour, + Min: 5 * time.Minute, + Max: 1 * time.Hour, } - resourceConfigScope, err := defaultResource.SetResourceConfig(logger, defaultResource.Source(), creds.VersionedResourceTypes{}) + resourceConfigScope, err := defaultResource.SetResourceConfig(defaultResource.Source(), atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) globalResourceContainer, err = defaultWorker.CreateContainer( @@ -606,7 +604,7 @@ var _ = Describe("Team", func() { }) It("returns the container only from the team worker and global worker", func() { - containers, err := defaultTeam.Containers(logger) + containers, err := defaultTeam.Containers() Expect(err).ToNot(HaveOccurred()) Expect(containers).To(HaveLen(2)) @@ -620,11 +618,11 @@ var _ = Describe("Team", func() { BeforeEach(func() { expiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 1 * time.Hour, + Min: 5 * time.Minute, + Max: 1 * time.Hour, } - resourceConfigScope, err := defaultResourceType.SetResourceConfig(logger, defaultResourceType.Source(), creds.VersionedResourceTypes{}) + resourceConfigScope, err := defaultResourceType.SetResourceConfig(defaultResourceType.Source(), atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) resourceContainer, err = defaultWorker.CreateContainer( @@ -637,7 +635,7 @@ var _ = Describe("Team", func() { }) It("finds the container", func() { - containers, err := defaultTeam.Containers(logger) + containers, err := defaultTeam.Containers() Expect(err).ToNot(HaveOccurred()) Expect(containers).To(HaveLen(1)) @@ -1773,7 +1771,7 @@ var _ = Describe("Team", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - rc, err := resource.SetResourceConfig(logger, atc.Source{"source-config": "some-value"}, creds.VersionedResourceTypes{}) + rc, err := resource.SetResourceConfig(atc.Source{"source-config": "some-value"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = rc.SaveVersions([]atc.Version{ @@ -1827,7 +1825,7 @@ var _ = Describe("Team", func() { Expect(err).NotTo(HaveOccurred()) Expect(setupTx.Commit()).To(Succeed()) - rc, err := resource.SetResourceConfig(logger, atc.Source{"source-config": "some-value"}, creds.VersionedResourceTypes{}) + rc, err := resource.SetResourceConfig(atc.Source{"source-config": "some-value"}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) err = rc.SaveVersions([]atc.Version{ @@ -2579,12 +2577,11 @@ var _ = Describe("Team", func() { Describe("FindCheckContainers", func() { var ( fakeSecretManager *credsfakes.FakeSecrets - variables creds.Variables ) expiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 1 * time.Hour, + Min: 5 * time.Minute, + Max: 1 * time.Hour, } BeforeEach(func() { @@ -2603,10 +2600,9 @@ var _ = Describe("Team", func() { Expect(err).ToNot(HaveOccurred()) resourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig( - logger, defaultResource.Type(), defaultResource.Source(), - creds.NewVersionedResourceTypes(variables, pipelineResourceTypes.Deserialize()), + pipelineResourceTypes.Deserialize(), ) Expect(err).ToNot(HaveOccurred()) @@ -2618,7 +2614,7 @@ var _ = Describe("Team", func() { }) It("returns check container for resource", func() { - containers, checkContainersExpiresAt, err := defaultTeam.FindCheckContainers(logger, "default-pipeline", "some-resource", fakeSecretManager) + containers, checkContainersExpiresAt, err := defaultTeam.FindCheckContainers("default-pipeline", "some-resource", fakeSecretManager) Expect(err).ToNot(HaveOccurred()) Expect(containers).To(HaveLen(1)) Expect(containers[0].ID()).To(Equal(resourceContainer.ID())) @@ -2654,21 +2650,20 @@ var _ = Describe("Team", func() { Expect(found).To(BeTrue()) resourceConfig, err = resourceConfigFactory.FindOrCreateResourceConfig( - logger, otherResource.Type(), otherResource.Source(), - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) - otherResourceContainer, _, err = defaultWorker.FindContainerOnWorker( + otherResourceContainer, _, err = defaultWorker.FindContainer( db.NewResourceConfigCheckSessionContainerOwner(resourceConfig, expiries), ) Expect(err).ToNot(HaveOccurred()) }) It("returns the same check container", func() { - containers, checkContainersExpiresAt, err := defaultTeam.FindCheckContainers(logger, "other-pipeline", "some-resource", fakeSecretManager) + containers, checkContainersExpiresAt, err := defaultTeam.FindCheckContainers("other-pipeline", "some-resource", fakeSecretManager) Expect(err).ToNot(HaveOccurred()) Expect(containers).To(HaveLen(1)) Expect(containers[0].ID()).To(Equal(otherResourceContainer.ID())) @@ -2681,7 +2676,7 @@ var _ = Describe("Team", func() { Context("when check container does not exist", func() { It("returns empty list", func() { - containers, checkContainersExpiresAt, err := defaultTeam.FindCheckContainers(logger, "default-pipeline", "some-resource", fakeSecretManager) + containers, checkContainersExpiresAt, err := defaultTeam.FindCheckContainers("default-pipeline", "some-resource", fakeSecretManager) Expect(err).ToNot(HaveOccurred()) Expect(containers).To(BeEmpty()) Expect(checkContainersExpiresAt).To(BeEmpty()) @@ -2691,7 +2686,7 @@ var _ = Describe("Team", func() { Context("when resource does not exist", func() { It("returns empty list", func() { - containers, checkContainersExpiresAt, err := defaultTeam.FindCheckContainers(logger, "default-pipeline", "non-existent-resource", fakeSecretManager) + containers, checkContainersExpiresAt, err := defaultTeam.FindCheckContainers("default-pipeline", "non-existent-resource", fakeSecretManager) Expect(err).ToNot(HaveOccurred()) Expect(containers).To(BeEmpty()) Expect(checkContainersExpiresAt).To(BeEmpty()) @@ -2701,7 +2696,7 @@ var _ = Describe("Team", func() { Context("when pipeline does not exist", func() { It("returns empty list", func() { - containers, checkContainersExpiresAt, err := defaultTeam.FindCheckContainers(logger, "non-existent-pipeline", "some-resource", fakeSecretManager) + containers, checkContainersExpiresAt, err := defaultTeam.FindCheckContainers("non-existent-pipeline", "some-resource", fakeSecretManager) Expect(err).ToNot(HaveOccurred()) Expect(containers).To(BeEmpty()) Expect(checkContainersExpiresAt).To(BeEmpty()) @@ -2713,15 +2708,14 @@ var _ = Describe("Team", func() { Context("when the container is a check container", func() { var resourceContainer db.Container expiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 1 * time.Hour, + Min: 5 * time.Minute, + Max: 1 * time.Hour, } BeforeEach(func() { resourceConfigScope, err := defaultResource.SetResourceConfig( - logger, defaultResource.Source(), - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) diff --git a/atc/db/volume_repository_test.go b/atc/db/volume_repository_test.go index 67f525bfa..e7a24683c 100644 --- a/atc/db/volume_repository_test.go +++ b/atc/db/volume_repository_test.go @@ -4,9 +4,7 @@ import ( "time" sq "github.com/Masterminds/squirrel" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/lib/pq" . "github.com/onsi/ginkgo" @@ -26,7 +24,6 @@ var _ = Describe("VolumeFactory", func() { Expect(err).ToNot(HaveOccurred()) usedResourceCache, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-type", atc.Version{"some": "version"}, @@ -34,21 +31,18 @@ var _ = Describe("VolumeFactory", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes( - template.StaticVariables{}, - atc.VersionedResourceTypes{ - atc.VersionedResourceType{ - ResourceType: atc.ResourceType{ - Name: "some-type", - Type: "some-base-resource-type", - Source: atc.Source{ - "some-type": "source", - }, + atc.VersionedResourceTypes{ + atc.VersionedResourceType{ + ResourceType: atc.ResourceType{ + Name: "some-type", + Type: "some-base-resource-type", + Source: atc.Source{ + "some-type": "source", }, - Version: atc.Version{"some-type": "version"}, }, + Version: atc.Version{"some-type": "version"}, }, - ), + }, ) Expect(err).NotTo(HaveOccurred()) }) @@ -493,7 +487,6 @@ var _ = Describe("VolumeFactory", func() { Expect(err).NotTo(HaveOccurred()) usedResourceCache, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-type", atc.Version{"some": "version"}, @@ -501,21 +494,18 @@ var _ = Describe("VolumeFactory", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - atc.VersionedResourceType{ - ResourceType: atc.ResourceType{ - Name: "some-type", - Type: "some-base-resource-type", - Source: atc.Source{ - "some-type": "source", - }, + atc.VersionedResourceTypes{ + atc.VersionedResourceType{ + ResourceType: atc.ResourceType{ + Name: "some-type", + Type: "some-base-resource-type", + Source: atc.Source{ + "some-type": "source", }, - Version: atc.Version{"some-type": "version"}, }, + Version: atc.Version{"some-type": "version"}, }, - ), + }, ) Expect(err).ToNot(HaveOccurred()) }) diff --git a/atc/db/volume_test.go b/atc/db/volume_test.go index aacb8407c..e6359b4b0 100644 --- a/atc/db/volume_test.go +++ b/atc/db/volume_test.go @@ -3,9 +3,7 @@ package db_test import ( "time" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -17,11 +15,11 @@ var _ = Describe("Volume", func() { BeforeEach(func() { expiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 1 * time.Hour, + Min: 5 * time.Minute, + Max: 1 * time.Hour, } - resourceConfig, err := resourceConfigFactory.FindOrCreateResourceConfig(logger, "some-base-resource-type", atc.Source{}, creds.VersionedResourceTypes{}) + resourceConfig, err := resourceConfigFactory.FindOrCreateResourceConfig("some-base-resource-type", atc.Source{}, atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) defaultCreatingContainer, err = defaultWorker.CreateContainer(db.NewResourceConfigCheckSessionContainerOwner(resourceConfig, expiries), db.ContainerMetadata{Type: "check"}) @@ -188,7 +186,6 @@ var _ = Describe("Volume", func() { Expect(err).ToNot(HaveOccurred()) resourceCache, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-type", atc.Version{"some": "version"}, @@ -196,21 +193,18 @@ var _ = Describe("Volume", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes( - template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - atc.VersionedResourceType{ - ResourceType: atc.ResourceType{ - Name: "some-type", - Type: "some-base-resource-type", - Source: atc.Source{ - "some-type": "source", - }, + atc.VersionedResourceTypes{ + atc.VersionedResourceType{ + ResourceType: atc.ResourceType{ + Name: "some-type", + Type: "some-base-resource-type", + Source: atc.Source{ + "some-type": "source", }, - Version: atc.Version{"some-type": "version"}, }, + Version: atc.Version{"some-type": "version"}, }, - ), + }, ) Expect(err).ToNot(HaveOccurred()) @@ -414,24 +408,22 @@ var _ = Describe("Volume", func() { Expect(err).ToNot(HaveOccurred()) resourceCache, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-type", atc.Version{"some": "version"}, atc.Source{"some": "source"}, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes(template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - { - ResourceType: atc.ResourceType{ - Name: "some-type", - Type: "some-base-resource-type", - Source: atc.Source{"some-type": "((source-param))"}, - }, - Version: atc.Version{"some-custom-type": "version"}, + + atc.VersionedResourceTypes{ + { + ResourceType: atc.ResourceType{ + Name: "some-type", + Type: "some-base-resource-type", + Source: atc.Source{"some-type": "((source-param))"}, }, + Version: atc.Version{"some-custom-type": "version"}, }, - ), + }, ) Expect(err).ToNot(HaveOccurred()) @@ -543,24 +535,21 @@ var _ = Describe("Volume", func() { Expect(err).ToNot(HaveOccurred()) usedResourceCache, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-type", atc.Version{"some": "version"}, atc.Source{"some": "source"}, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes(template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - { - ResourceType: atc.ResourceType{ - Name: "some-type", - Type: "some-base-resource-type", - Source: atc.Source{"some-type": "source"}, - }, - Version: atc.Version{"some-custom-type": "version"}, + atc.VersionedResourceTypes{ + { + ResourceType: atc.ResourceType{ + Name: "some-type", + Type: "some-base-resource-type", + Source: atc.Source{"some-type": "source"}, }, + Version: atc.Version{"some-custom-type": "version"}, }, - ), + }, ) Expect(err).ToNot(HaveOccurred()) diff --git a/atc/db/worker.go b/atc/db/worker.go index 7b9b75774..79dc82206 100644 --- a/atc/db/worker.go +++ b/atc/db/worker.go @@ -67,7 +67,7 @@ type Worker interface { Prune() error Delete() error - FindContainerOnWorker(owner ContainerOwner) (CreatingContainer, CreatedContainer, error) + FindContainer(owner ContainerOwner) (CreatingContainer, CreatedContainer, error) CreateContainer(owner ContainerOwner, meta ContainerMetadata) (CreatingContainer, error) } @@ -255,7 +255,7 @@ func (worker *worker) ResourceCerts() (*UsedWorkerResourceCerts, bool, error) { return nil, false, nil } -func (worker *worker) FindContainerOnWorker(owner ContainerOwner) (CreatingContainer, CreatedContainer, error) { +func (worker *worker) FindContainer(owner ContainerOwner) (CreatingContainer, CreatedContainer, error) { ownerQuery, found, err := owner.Find(worker.conn) if err != nil { return nil, nil, err diff --git a/atc/db/worker_factory_test.go b/atc/db/worker_factory_test.go index ee3a4375d..9a7ec0d96 100644 --- a/atc/db/worker_factory_test.go +++ b/atc/db/worker_factory_test.go @@ -5,7 +5,6 @@ import ( sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/dbfakes" @@ -566,8 +565,8 @@ var _ = Describe("WorkerFactory", func() { BeforeEach(func() { ownerExpiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 5 * time.Minute, + Min: 5 * time.Minute, + Max: 5 * time.Minute, } var err error @@ -624,7 +623,7 @@ var _ = Describe("WorkerFactory", func() { Expect(err).NotTo(HaveOccurred()) Expect(found).To(BeTrue()) - rcs, err := otherResource.SetResourceConfig(logger, atc.Source{"some": "source"}, creds.VersionedResourceTypes{}) + rcs, err := otherResource.SetResourceConfig(atc.Source{"some": "source"}, atc.VersionedResourceTypes{}) Expect(err).NotTo(HaveOccurred()) owner = db.NewResourceConfigCheckSessionContainerOwner(rcs.ResourceConfig(), ownerExpiries) diff --git a/atc/db/worker_resource_cache_test.go b/atc/db/worker_resource_cache_test.go index b207faf0d..df78a55d7 100644 --- a/atc/db/worker_resource_cache_test.go +++ b/atc/db/worker_resource_cache_test.go @@ -2,7 +2,6 @@ package db_test import ( "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" . "github.com/onsi/ginkgo" @@ -18,13 +17,12 @@ var _ = Describe("WorkerResourceCache", func() { Expect(err).ToNot(HaveOccurred()) resourceCache, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-base-resource-type", atc.Version{"some": "version"}, atc.Source{"some": "source"}, atc.Params{}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) @@ -82,13 +80,12 @@ var _ = Describe("WorkerResourceCache", func() { Expect(err).ToNot(HaveOccurred()) resourceCache, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-base-resource-type", atc.Version{"some": "version"}, atc.Source{"some": "source"}, atc.Params{}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) @@ -133,13 +130,12 @@ var _ = Describe("WorkerResourceCache", func() { Expect(err).ToNot(HaveOccurred()) resourceCache, err := resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(build.ID()), "some-bogus-resource-type", atc.Version{"some": "version"}, atc.Source{"some": "source"}, atc.Params{}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) diff --git a/atc/db/worker_test.go b/atc/db/worker_test.go index 9724514f2..31d2b9e1b 100644 --- a/atc/db/worker_test.go +++ b/atc/db/worker_test.go @@ -6,7 +6,6 @@ import ( "time" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" . "github.com/concourse/concourse/atc/db" . "github.com/onsi/ginkgo" @@ -206,7 +205,7 @@ var _ = Describe("Worker", func() { }) }) - Describe("FindContainerOnWorker/CreateContainer", func() { + Describe("FindContainer/CreateContainer", func() { var ( containerMetadata ContainerMetadata containerOwner ContainerOwner @@ -217,8 +216,8 @@ var _ = Describe("Worker", func() { ) expiries := ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 1 * time.Hour, + Min: 5 * time.Minute, + Max: 1 * time.Hour, } BeforeEach(func() { @@ -237,10 +236,9 @@ var _ = Describe("Worker", func() { Expect(err).NotTo(HaveOccurred()) resourceConfig, err := resourceConfigFactory.FindOrCreateResourceConfig( - logger, "some-resource-type", atc.Source{"some": "source"}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).ToNot(HaveOccurred()) @@ -249,7 +247,7 @@ var _ = Describe("Worker", func() { JustBeforeEach(func() { var err error - foundCreatingContainer, foundCreatedContainer, err = worker.FindContainerOnWorker(containerOwner) + foundCreatingContainer, foundCreatedContainer, err = worker.FindContainer(containerOwner) Expect(err).ToNot(HaveOccurred()) }) diff --git a/atc/engine/builder/builder.go b/atc/engine/builder/builder.go index b650c70a2..059417231 100644 --- a/atc/engine/builder/builder.go +++ b/atc/engine/builder/builder.go @@ -2,7 +2,6 @@ package builder import ( "errors" - "fmt" "strconv" "strings" @@ -16,9 +15,9 @@ const supportedSchema = "exec.v2" //go:generate counterfeiter . StepFactory type StepFactory interface { - GetStep(atc.Plan, db.Build, exec.StepMetadata, db.ContainerMetadata, exec.GetDelegate) exec.Step - PutStep(atc.Plan, db.Build, exec.StepMetadata, db.ContainerMetadata, exec.PutDelegate) exec.Step - TaskStep(atc.Plan, db.Build, db.ContainerMetadata, exec.TaskDelegate) exec.Step + GetStep(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.GetDelegate) exec.Step + PutStep(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.PutDelegate) exec.Step + TaskStep(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate) exec.Step ArtifactInputStep(atc.Plan, db.Build, exec.BuildStepDelegate) exec.Step ArtifactOutputStep(atc.Plan, db.Build, exec.BuildStepDelegate) exec.Step } @@ -254,7 +253,6 @@ func (builder *stepBuilder) buildGetStep(build db.Build, plan atc.Plan) exec.Ste return builder.stepFactory.GetStep( plan, - build, stepMetadata, containerMetadata, builder.delegateFactory.GetDelegate(build, plan.ID), @@ -277,7 +275,6 @@ func (builder *stepBuilder) buildPutStep(build db.Build, plan atc.Plan) exec.Ste return builder.stepFactory.PutStep( plan, - build, stepMetadata, containerMetadata, builder.delegateFactory.PutDelegate(build, plan.ID), @@ -293,9 +290,14 @@ func (builder *stepBuilder) buildTaskStep(build db.Build, plan atc.Plan) exec.St plan.Attempts, ) + stepMetadata := builder.stepMetadata( + build, + builder.externalURL, + ) + return builder.stepFactory.TaskStep( plan, - build, + stepMetadata, containerMetadata, builder.delegateFactory.TaskDelegate(build, plan.ID), ) @@ -349,49 +351,16 @@ func (builder *stepBuilder) containerMetadata( func (builder *stepBuilder) stepMetadata( build db.Build, externalURL string, -) StepMetadata { - return StepMetadata{ +) exec.StepMetadata { + return exec.StepMetadata{ BuildID: build.ID(), BuildName: build.Name(), - JobName: build.JobName(), - PipelineName: build.PipelineName(), + TeamID: build.TeamID(), TeamName: build.TeamName(), + JobID: build.JobID(), + JobName: build.JobName(), + PipelineID: build.PipelineID(), + PipelineName: build.PipelineName(), ExternalURL: externalURL, } } - -type StepMetadata struct { - BuildID int - - PipelineName string - JobName string - BuildName string - ExternalURL string - TeamName string -} - -func (metadata StepMetadata) Env() []string { - env := []string{fmt.Sprintf("BUILD_ID=%d", metadata.BuildID)} - - if metadata.PipelineName != "" { - env = append(env, "BUILD_PIPELINE_NAME="+metadata.PipelineName) - } - - if metadata.JobName != "" { - env = append(env, "BUILD_JOB_NAME="+metadata.JobName) - } - - if metadata.BuildName != "" { - env = append(env, "BUILD_NAME="+metadata.BuildName) - } - - if metadata.ExternalURL != "" { - env = append(env, "ATC_EXTERNAL_URL="+metadata.ExternalURL) - } - - if metadata.TeamName != "" { - env = append(env, "BUILD_TEAM_NAME="+metadata.TeamName) - } - - return env -} diff --git a/atc/engine/builder/builder_test.go b/atc/engine/builder/builder_test.go index 084375f9e..5e56b04a8 100644 --- a/atc/engine/builder/builder_test.go +++ b/atc/engine/builder/builder_test.go @@ -58,7 +58,7 @@ var _ = Describe("Builder", func() { fakeBuild *dbfakes.FakeBuild expectedPlan atc.Plan - expectedMetadata builder.StepMetadata + expectedMetadata exec.StepMetadata ) BeforeEach(func() { @@ -72,12 +72,15 @@ var _ = Describe("Builder", func() { fakeBuild.TeamNameReturns("some-team") fakeBuild.TeamIDReturns(1111) - expectedMetadata = builder.StepMetadata{ + expectedMetadata = exec.StepMetadata{ BuildID: 4444, BuildName: "42", - JobName: "some-job", - PipelineName: "some-pipeline", + TeamID: 1111, TeamName: "some-team", + JobID: 3333, + JobName: "some-job", + PipelineID: 2222, + PipelineName: "some-pipeline", ExternalURL: "http://example.com", } }) @@ -146,8 +149,7 @@ var _ = Describe("Builder", func() { Context("constructing outputs", func() { It("constructs the put correctly", func() { - plan, build, stepMetadata, containerMetadata, _ := fakeStepFactory.PutStepArgsForCall(0) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.PutStepArgsForCall(0) Expect(plan).To(Equal(putPlan)) Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ @@ -161,8 +163,7 @@ var _ = Describe("Builder", func() { BuildName: "42", })) - plan, build, stepMetadata, containerMetadata, _ = fakeStepFactory.PutStepArgsForCall(1) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ = fakeStepFactory.PutStepArgsForCall(1) Expect(plan).To(Equal(otherPutPlan)) Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ @@ -222,8 +223,7 @@ var _ = Describe("Builder", func() { Context("constructing outputs", func() { It("constructs the put correctly", func() { - plan, build, stepMetadata, containerMetadata, _ := fakeStepFactory.PutStepArgsForCall(0) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.PutStepArgsForCall(0) Expect(plan).To(Equal(putPlan)) Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ @@ -237,8 +237,7 @@ var _ = Describe("Builder", func() { BuildName: "42", })) - plan, build, stepMetadata, containerMetadata, _ = fakeStepFactory.PutStepArgsForCall(1) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ = fakeStepFactory.PutStepArgsForCall(1) Expect(plan).To(Equal(otherPutPlan)) Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ @@ -314,8 +313,7 @@ var _ = Describe("Builder", func() { }) It("constructs the first get correctly", func() { - plan, build, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(0) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(0) expectedPlan := getPlan expectedPlan.Attempts = []int{1} Expect(plan).To(Equal(expectedPlan)) @@ -334,8 +332,7 @@ var _ = Describe("Builder", func() { }) It("constructs the second get correctly", func() { - plan, build, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(1) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(1) expectedPlan := getPlan expectedPlan.Attempts = []int{3} Expect(plan).To(Equal(expectedPlan)) @@ -358,11 +355,11 @@ var _ = Describe("Builder", func() { }) It("constructs nested steps correctly", func() { - plan, build, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(0) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(0) expectedPlan := taskPlan expectedPlan.Attempts = []int{2, 1} Expect(plan).To(Equal(expectedPlan)) + Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ Type: db.ContainerTypeTask, StepName: "some-task", @@ -375,11 +372,11 @@ var _ = Describe("Builder", func() { Attempt: "2.1", })) - plan, build, containerMetadata, _ = fakeStepFactory.TaskStepArgsForCall(1) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ = fakeStepFactory.TaskStepArgsForCall(1) expectedPlan = taskPlan expectedPlan.Attempts = []int{2, 2} Expect(plan).To(Equal(expectedPlan)) + Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ Type: db.ContainerTypeTask, StepName: "some-task", @@ -474,8 +471,7 @@ var _ = Describe("Builder", func() { }) It("constructs inputs correctly", func() { - plan, dBuild, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(0) - Expect(dBuild).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(0) Expect(plan).To(Equal(expectedPlan)) Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ @@ -502,9 +498,9 @@ var _ = Describe("Builder", func() { }) It("constructs tasks correctly", func() { - plan, build, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(0) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(0) Expect(plan).To(Equal(expectedPlan)) + Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ Type: db.ContainerTypeTask, StepName: "some-task", @@ -551,8 +547,7 @@ var _ = Describe("Builder", func() { }) It("constructs the put correctly", func() { - plan, build, stepMetadata, containerMetadata, _ := fakeStepFactory.PutStepArgsForCall(0) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.PutStepArgsForCall(0) Expect(plan).To(Equal(putPlan)) Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ @@ -568,8 +563,7 @@ var _ = Describe("Builder", func() { }) It("constructs the dependent get correctly", func() { - plan, build, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(0) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(0) Expect(plan).To(Equal(dependentGetPlan)) Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ @@ -634,8 +628,7 @@ var _ = Describe("Builder", func() { It("constructs the step correctly", func() { Expect(fakeStepFactory.GetStepCallCount()).To(Equal(1)) - plan, build, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(0) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(0) Expect(plan).To(Equal(inputPlan)) Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ @@ -652,9 +645,9 @@ var _ = Describe("Builder", func() { It("constructs the completion hook correctly", func() { Expect(fakeStepFactory.TaskStepCallCount()).To(Equal(4)) - plan, build, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(2) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(2) Expect(plan).To(Equal(completionTaskPlan)) + Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ PipelineID: 2222, PipelineName: "some-pipeline", @@ -669,9 +662,9 @@ var _ = Describe("Builder", func() { It("constructs the failure hook correctly", func() { Expect(fakeStepFactory.TaskStepCallCount()).To(Equal(4)) - plan, build, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(0) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(0) Expect(plan).To(Equal(failureTaskPlan)) + Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ PipelineID: 2222, PipelineName: "some-pipeline", @@ -686,9 +679,9 @@ var _ = Describe("Builder", func() { It("constructs the success hook correctly", func() { Expect(fakeStepFactory.TaskStepCallCount()).To(Equal(4)) - plan, build, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(1) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(1) Expect(plan).To(Equal(successTaskPlan)) + Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ PipelineID: 2222, PipelineName: "some-pipeline", @@ -703,9 +696,9 @@ var _ = Describe("Builder", func() { It("constructs the next step correctly", func() { Expect(fakeStepFactory.TaskStepCallCount()).To(Equal(4)) - plan, build, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(3) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(3) Expect(plan).To(Equal(nextTaskPlan)) + Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ PipelineID: 2222, PipelineName: "some-pipeline", @@ -735,8 +728,7 @@ var _ = Describe("Builder", func() { It("constructs the step correctly", func() { Expect(fakeStepFactory.GetStepCallCount()).To(Equal(1)) - plan, build, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(0) - Expect(build).To(Equal(fakeBuild)) + plan, stepMetadata, containerMetadata, _ := fakeStepFactory.GetStepArgsForCall(0) Expect(plan).To(Equal(inputPlan)) Expect(stepMetadata).To(Equal(expectedMetadata)) Expect(containerMetadata).To(Equal(db.ContainerMetadata{ @@ -755,46 +747,4 @@ var _ = Describe("Builder", func() { }) }) - Describe("StepMetadata", func() { - var stepMetadata builder.StepMetadata - - Describe("Env", func() { - Context("when populating fields", func() { - BeforeEach(func() { - stepMetadata = builder.StepMetadata{ - BuildID: 1, - PipelineName: "some-pipeline-name", - JobName: "some-job-name", - BuildName: "42", - ExternalURL: "http://www.example.com", - TeamName: "some-team", - } - }) - - It("returns the specified values", func() { - Expect(stepMetadata.Env()).To(Equal([]string{ - "BUILD_ID=1", - "BUILD_PIPELINE_NAME=some-pipeline-name", - "BUILD_JOB_NAME=some-job-name", - "BUILD_NAME=42", - "ATC_EXTERNAL_URL=http://www.example.com", - "BUILD_TEAM_NAME=some-team", - })) - }) - }) - - Context("when fields are empty", func() { - BeforeEach(func() { - stepMetadata = builder.StepMetadata{ - BuildID: 1, - } - }) - It("does not include fields that are not set", func() { - Expect(stepMetadata.Env()).To(Equal([]string{ - "BUILD_ID=1", - })) - }) - }) - }) - }) }) diff --git a/atc/engine/builder/builderfakes/fake_step_factory.go b/atc/engine/builder/builderfakes/fake_step_factory.go index 3c2f06a11..5f36c606f 100644 --- a/atc/engine/builder/builderfakes/fake_step_factory.go +++ b/atc/engine/builder/builderfakes/fake_step_factory.go @@ -37,14 +37,13 @@ type FakeStepFactory struct { artifactOutputStepReturnsOnCall map[int]struct { result1 exec.Step } - GetStepStub func(atc.Plan, db.Build, exec.StepMetadata, db.ContainerMetadata, exec.GetDelegate) exec.Step + GetStepStub func(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.GetDelegate) exec.Step getStepMutex sync.RWMutex getStepArgsForCall []struct { arg1 atc.Plan - arg2 db.Build - arg3 exec.StepMetadata - arg4 db.ContainerMetadata - arg5 exec.GetDelegate + arg2 exec.StepMetadata + arg3 db.ContainerMetadata + arg4 exec.GetDelegate } getStepReturns struct { result1 exec.Step @@ -52,14 +51,13 @@ type FakeStepFactory struct { getStepReturnsOnCall map[int]struct { result1 exec.Step } - PutStepStub func(atc.Plan, db.Build, exec.StepMetadata, db.ContainerMetadata, exec.PutDelegate) exec.Step + PutStepStub func(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.PutDelegate) exec.Step putStepMutex sync.RWMutex putStepArgsForCall []struct { arg1 atc.Plan - arg2 db.Build - arg3 exec.StepMetadata - arg4 db.ContainerMetadata - arg5 exec.PutDelegate + arg2 exec.StepMetadata + arg3 db.ContainerMetadata + arg4 exec.PutDelegate } putStepReturns struct { result1 exec.Step @@ -67,11 +65,11 @@ type FakeStepFactory struct { putStepReturnsOnCall map[int]struct { result1 exec.Step } - TaskStepStub func(atc.Plan, db.Build, db.ContainerMetadata, exec.TaskDelegate) exec.Step + TaskStepStub func(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate) exec.Step taskStepMutex sync.RWMutex taskStepArgsForCall []struct { arg1 atc.Plan - arg2 db.Build + arg2 exec.StepMetadata arg3 db.ContainerMetadata arg4 exec.TaskDelegate } @@ -209,20 +207,19 @@ func (fake *FakeStepFactory) ArtifactOutputStepReturnsOnCall(i int, result1 exec }{result1} } -func (fake *FakeStepFactory) GetStep(arg1 atc.Plan, arg2 db.Build, arg3 exec.StepMetadata, arg4 db.ContainerMetadata, arg5 exec.GetDelegate) exec.Step { +func (fake *FakeStepFactory) GetStep(arg1 atc.Plan, arg2 exec.StepMetadata, arg3 db.ContainerMetadata, arg4 exec.GetDelegate) exec.Step { fake.getStepMutex.Lock() ret, specificReturn := fake.getStepReturnsOnCall[len(fake.getStepArgsForCall)] fake.getStepArgsForCall = append(fake.getStepArgsForCall, struct { arg1 atc.Plan - arg2 db.Build - arg3 exec.StepMetadata - arg4 db.ContainerMetadata - arg5 exec.GetDelegate - }{arg1, arg2, arg3, arg4, arg5}) - fake.recordInvocation("GetStep", []interface{}{arg1, arg2, arg3, arg4, arg5}) + arg2 exec.StepMetadata + arg3 db.ContainerMetadata + arg4 exec.GetDelegate + }{arg1, arg2, arg3, arg4}) + fake.recordInvocation("GetStep", []interface{}{arg1, arg2, arg3, arg4}) fake.getStepMutex.Unlock() if fake.GetStepStub != nil { - return fake.GetStepStub(arg1, arg2, arg3, arg4, arg5) + return fake.GetStepStub(arg1, arg2, arg3, arg4) } if specificReturn { return ret.result1 @@ -237,17 +234,17 @@ func (fake *FakeStepFactory) GetStepCallCount() int { return len(fake.getStepArgsForCall) } -func (fake *FakeStepFactory) GetStepCalls(stub func(atc.Plan, db.Build, exec.StepMetadata, db.ContainerMetadata, exec.GetDelegate) exec.Step) { +func (fake *FakeStepFactory) GetStepCalls(stub func(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.GetDelegate) exec.Step) { fake.getStepMutex.Lock() defer fake.getStepMutex.Unlock() fake.GetStepStub = stub } -func (fake *FakeStepFactory) GetStepArgsForCall(i int) (atc.Plan, db.Build, exec.StepMetadata, db.ContainerMetadata, exec.GetDelegate) { +func (fake *FakeStepFactory) GetStepArgsForCall(i int) (atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.GetDelegate) { fake.getStepMutex.RLock() defer fake.getStepMutex.RUnlock() argsForCall := fake.getStepArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 } func (fake *FakeStepFactory) GetStepReturns(result1 exec.Step) { @@ -273,20 +270,19 @@ func (fake *FakeStepFactory) GetStepReturnsOnCall(i int, result1 exec.Step) { }{result1} } -func (fake *FakeStepFactory) PutStep(arg1 atc.Plan, arg2 db.Build, arg3 exec.StepMetadata, arg4 db.ContainerMetadata, arg5 exec.PutDelegate) exec.Step { +func (fake *FakeStepFactory) PutStep(arg1 atc.Plan, arg2 exec.StepMetadata, arg3 db.ContainerMetadata, arg4 exec.PutDelegate) exec.Step { fake.putStepMutex.Lock() ret, specificReturn := fake.putStepReturnsOnCall[len(fake.putStepArgsForCall)] fake.putStepArgsForCall = append(fake.putStepArgsForCall, struct { arg1 atc.Plan - arg2 db.Build - arg3 exec.StepMetadata - arg4 db.ContainerMetadata - arg5 exec.PutDelegate - }{arg1, arg2, arg3, arg4, arg5}) - fake.recordInvocation("PutStep", []interface{}{arg1, arg2, arg3, arg4, arg5}) + arg2 exec.StepMetadata + arg3 db.ContainerMetadata + arg4 exec.PutDelegate + }{arg1, arg2, arg3, arg4}) + fake.recordInvocation("PutStep", []interface{}{arg1, arg2, arg3, arg4}) fake.putStepMutex.Unlock() if fake.PutStepStub != nil { - return fake.PutStepStub(arg1, arg2, arg3, arg4, arg5) + return fake.PutStepStub(arg1, arg2, arg3, arg4) } if specificReturn { return ret.result1 @@ -301,17 +297,17 @@ func (fake *FakeStepFactory) PutStepCallCount() int { return len(fake.putStepArgsForCall) } -func (fake *FakeStepFactory) PutStepCalls(stub func(atc.Plan, db.Build, exec.StepMetadata, db.ContainerMetadata, exec.PutDelegate) exec.Step) { +func (fake *FakeStepFactory) PutStepCalls(stub func(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.PutDelegate) exec.Step) { fake.putStepMutex.Lock() defer fake.putStepMutex.Unlock() fake.PutStepStub = stub } -func (fake *FakeStepFactory) PutStepArgsForCall(i int) (atc.Plan, db.Build, exec.StepMetadata, db.ContainerMetadata, exec.PutDelegate) { +func (fake *FakeStepFactory) PutStepArgsForCall(i int) (atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.PutDelegate) { fake.putStepMutex.RLock() defer fake.putStepMutex.RUnlock() argsForCall := fake.putStepArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 } func (fake *FakeStepFactory) PutStepReturns(result1 exec.Step) { @@ -337,12 +333,12 @@ func (fake *FakeStepFactory) PutStepReturnsOnCall(i int, result1 exec.Step) { }{result1} } -func (fake *FakeStepFactory) TaskStep(arg1 atc.Plan, arg2 db.Build, arg3 db.ContainerMetadata, arg4 exec.TaskDelegate) exec.Step { +func (fake *FakeStepFactory) TaskStep(arg1 atc.Plan, arg2 exec.StepMetadata, arg3 db.ContainerMetadata, arg4 exec.TaskDelegate) exec.Step { fake.taskStepMutex.Lock() ret, specificReturn := fake.taskStepReturnsOnCall[len(fake.taskStepArgsForCall)] fake.taskStepArgsForCall = append(fake.taskStepArgsForCall, struct { arg1 atc.Plan - arg2 db.Build + arg2 exec.StepMetadata arg3 db.ContainerMetadata arg4 exec.TaskDelegate }{arg1, arg2, arg3, arg4}) @@ -364,13 +360,13 @@ func (fake *FakeStepFactory) TaskStepCallCount() int { return len(fake.taskStepArgsForCall) } -func (fake *FakeStepFactory) TaskStepCalls(stub func(atc.Plan, db.Build, db.ContainerMetadata, exec.TaskDelegate) exec.Step) { +func (fake *FakeStepFactory) TaskStepCalls(stub func(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate) exec.Step) { fake.taskStepMutex.Lock() defer fake.taskStepMutex.Unlock() fake.TaskStepStub = stub } -func (fake *FakeStepFactory) TaskStepArgsForCall(i int) (atc.Plan, db.Build, db.ContainerMetadata, exec.TaskDelegate) { +func (fake *FakeStepFactory) TaskStepArgsForCall(i int) (atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate) { fake.taskStepMutex.RLock() defer fake.taskStepMutex.RUnlock() argsForCall := fake.taskStepArgsForCall[i] diff --git a/atc/engine/builder/delegate_factory.go b/atc/engine/builder/delegate_factory.go index ac88db751..7528e0482 100644 --- a/atc/engine/builder/delegate_factory.go +++ b/atc/engine/builder/delegate_factory.go @@ -95,6 +95,44 @@ func (d *getDelegate) Finished(logger lager.Logger, exitStatus exec.ExitStatus, logger.Info("finished", lager.Data{"exit-status": exitStatus}) } +func (d *getDelegate) UpdateVersion(log lager.Logger, plan atc.GetPlan, info exec.VersionInfo) { + logger := log.WithData(lager.Data{ + "pipeline-name": d.build.PipelineName(), + "pipeline-id": d.build.PipelineID()}, + ) + + pipeline, found, err := d.build.Pipeline() + if err != nil { + logger.Error("failed-to-find-pipeline", err) + return + } + + if !found { + logger.Debug("pipeline-not-found") + return + } + + resource, found, err := pipeline.Resource(plan.Resource) + if err != nil { + logger.Error("failed-to-find-resource", err) + return + } + + if !found { + logger.Debug("resource-not-found") + return + } + + _, err = resource.UpdateMetadata( + info.Version, + db.NewResourceConfigMetadataFields(info.Metadata), + ) + if err != nil { + logger.Error("failed-to-save-resource-config-version-metadata", err) + return + } +} + func NewPutDelegate(build db.Build, planID atc.PlanID, clock clock.Clock) exec.PutDelegate { return &putDelegate{ BuildStepDelegate: NewBuildStepDelegate(build, planID, clock), @@ -155,6 +193,29 @@ func (d *putDelegate) Finished(logger lager.Logger, exitStatus exec.ExitStatus, logger.Info("finished", lager.Data{"exit-status": exitStatus, "version-info": info}) } +func (d *putDelegate) SaveOutput(log lager.Logger, plan atc.PutPlan, source atc.Source, resourceTypes atc.VersionedResourceTypes, info exec.VersionInfo) { + logger := log.WithData(lager.Data{ + "step": plan.Name, + "resource": plan.Resource, + "resource-type": plan.Type, + "version": info.Version, + }) + + err := d.build.SaveOutput( + plan.Type, + source, + resourceTypes, + info.Version, + db.NewResourceConfigMetadataFields(info.Metadata), + plan.Name, + plan.Resource, + ) + if err != nil { + logger.Error("failed-to-save-output", err) + return + } +} + func NewTaskDelegate(build db.Build, planID atc.PlanID, clock clock.Clock) exec.TaskDelegate { return &taskDelegate{ BuildStepDelegate: NewBuildStepDelegate(build, planID, clock), diff --git a/atc/engine/builder/delegate_factory_test.go b/atc/engine/builder/delegate_factory_test.go index 465e8172b..e96bb1814 100644 --- a/atc/engine/builder/delegate_factory_test.go +++ b/atc/engine/builder/delegate_factory_test.go @@ -9,6 +9,7 @@ import ( "code.cloudfoundry.org/lager" "code.cloudfoundry.org/lager/lagertest" "github.com/concourse/concourse/atc" + "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/dbfakes" "github.com/concourse/concourse/atc/engine/builder" "github.com/concourse/concourse/atc/event" @@ -20,15 +21,19 @@ import ( var _ = Describe("DelegateFactory", func() { var ( - logger *lagertest.TestLogger - fakeBuild *dbfakes.FakeBuild - fakeClock *fakeclock.FakeClock + logger *lagertest.TestLogger + fakeBuild *dbfakes.FakeBuild + fakePipeline *dbfakes.FakePipeline + fakeResource *dbfakes.FakeResource + fakeClock *fakeclock.FakeClock ) BeforeEach(func() { logger = lagertest.NewTestLogger("test") fakeBuild = new(dbfakes.FakeBuild) + fakePipeline = new(dbfakes.FakePipeline) + fakeResource = new(dbfakes.FakeResource) fakeClock = fakeclock.NewFakeClock(time.Unix(123456789, 0)) }) @@ -64,6 +69,82 @@ var _ = Describe("DelegateFactory", func() { })) }) }) + + Describe("UpdateVersion", func() { + JustBeforeEach(func() { + plan := atc.GetPlan{Resource: "some-resource"} + delegate.UpdateVersion(logger, plan, info) + }) + + Context("when retrieving the pipeline fails", func() { + BeforeEach(func() { + fakeBuild.PipelineReturns(nil, false, errors.New("nope")) + }) + + It("doesn't update the metadata", func() { + Expect(fakeResource.UpdateMetadataCallCount()).To(Equal(0)) + }) + }) + + Context("when retrieving the pipeline succeeds", func() { + + Context("when the pipeline is not found", func() { + BeforeEach(func() { + fakeBuild.PipelineReturns(nil, false, nil) + }) + + It("doesn't update the metadata", func() { + Expect(fakeResource.UpdateMetadataCallCount()).To(Equal(0)) + }) + }) + + Context("when the pipeline is found", func() { + BeforeEach(func() { + fakeBuild.PipelineReturns(fakePipeline, true, nil) + }) + + Context("when retrieving the resource fails", func() { + BeforeEach(func() { + fakePipeline.ResourceReturns(nil, false, errors.New("nope")) + }) + + It("doesn't update the metadata", func() { + Expect(fakeResource.UpdateMetadataCallCount()).To(Equal(0)) + }) + }) + + Context("when retrieving the resource succeeds", func() { + + It("retrives the resource by name", func() { + Expect(fakePipeline.ResourceArgsForCall(0)).To(Equal("some-resource")) + }) + + Context("when the resource is not found", func() { + BeforeEach(func() { + fakePipeline.ResourceReturns(nil, false, nil) + }) + + It("doesn't update the metadata", func() { + Expect(fakeResource.UpdateMetadataCallCount()).To(Equal(0)) + }) + }) + + Context("when the resource is found", func() { + BeforeEach(func() { + fakePipeline.ResourceReturns(fakeResource, true, nil) + }) + + It("updates the metadata", func() { + Expect(fakeResource.UpdateMetadataCallCount()).To(Equal(1)) + version, metadata := fakeResource.UpdateMetadataArgsForCall(0) + Expect(version).To(Equal(info.Version)) + Expect(metadata).To(Equal(db.NewResourceConfigMetadataFields(info.Metadata))) + }) + }) + }) + }) + }) + }) }) Describe("PutDelegate", func() { @@ -98,6 +179,36 @@ var _ = Describe("DelegateFactory", func() { })) }) }) + + Describe("SaveOutput", func() { + var plan atc.PutPlan + var source atc.Source + var resourceTypes atc.VersionedResourceTypes + + JustBeforeEach(func() { + plan = atc.PutPlan{ + Name: "some-name", + Type: "some-type", + Resource: "some-resource", + } + source = atc.Source{"some": "source"} + resourceTypes = atc.VersionedResourceTypes{} + + delegate.SaveOutput(logger, plan, source, resourceTypes, info) + }) + + It("saves the build output", func() { + Expect(fakeBuild.SaveOutputCallCount()).To(Equal(1)) + resourceType, sourceArg, resourceTypesArg, version, metadata, name, resource := fakeBuild.SaveOutputArgsForCall(0) + Expect(resourceType).To(Equal(plan.Type)) + Expect(sourceArg).To(Equal(source)) + Expect(resourceTypesArg).To(Equal(resourceTypes)) + Expect(version).To(Equal(info.Version)) + Expect(metadata).To(Equal(db.NewResourceConfigMetadataFields(info.Metadata))) + Expect(name).To(Equal(plan.Name)) + Expect(resource).To(Equal(plan.Resource)) + }) + }) }) Describe("TaskDelegate", func() { diff --git a/atc/engine/builder/step_factory.go b/atc/engine/builder/step_factory.go index 9891935fc..03729a23a 100644 --- a/atc/engine/builder/step_factory.go +++ b/atc/engine/builder/step_factory.go @@ -5,7 +5,6 @@ import ( "fmt" "path/filepath" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" @@ -52,39 +51,23 @@ func NewStepFactory( func (factory *stepFactory) GetStep( plan atc.Plan, - build db.Build, stepMetadata exec.StepMetadata, - workerMetadata db.ContainerMetadata, + containerMetadata db.ContainerMetadata, delegate exec.GetDelegate, ) exec.Step { - workerMetadata.WorkingDirectory = resource.ResourcesDir("get") - - variables := creds.NewVariables(factory.secretManager, build.TeamName(), build.PipelineName()) + containerMetadata.WorkingDirectory = resource.ResourcesDir("get") getStep := exec.NewGetStep( - build, - - plan.Get.Name, - plan.Get.Type, - plan.Get.Resource, - creds.NewSource(variables, plan.Get.Source), - creds.NewParams(variables, plan.Get.Params), - exec.NewVersionSourceFromPlan(plan.Get), - plan.Get.Tags, - - delegate, - factory.resourceFetcher, - build.TeamID(), - build.ID(), plan.ID, - workerMetadata, - factory.resourceCacheFactory, + *plan.Get, stepMetadata, - - creds.NewVersionedResourceTypes(variables, plan.Get.VersionedResourceTypes), - + containerMetadata, + factory.secretManager, + factory.resourceFetcher, + factory.resourceCacheFactory, factory.strategy, factory.pool, + delegate, ) return exec.LogError(getStep, delegate) @@ -92,50 +75,23 @@ func (factory *stepFactory) GetStep( func (factory *stepFactory) PutStep( plan atc.Plan, - build db.Build, stepMetadata exec.StepMetadata, - workerMetadata db.ContainerMetadata, + containerMetadata db.ContainerMetadata, delegate exec.PutDelegate, ) exec.Step { - workerMetadata.WorkingDirectory = resource.ResourcesDir("put") - - variables := creds.NewVariables(factory.secretManager, build.TeamName(), build.PipelineName()) - - var putInputs exec.PutInputs - if plan.Put.Inputs == nil { - // Put step defaults to all inputs if not specified - putInputs = exec.NewAllInputs() - } else if plan.Put.Inputs.All { - putInputs = exec.NewAllInputs() - } else { - // Covers both cases where inputs are specified and when there are no - // inputs specified and "all" field is given a false boolean, which will - // result in no inputs attached - putInputs = exec.NewSpecificInputs(plan.Put.Inputs.Specified) - } + containerMetadata.WorkingDirectory = resource.ResourcesDir("put") putStep := exec.NewPutStep( - build, - - plan.Put.Name, - plan.Put.Type, - plan.Put.Resource, - creds.NewSource(variables, plan.Put.Source), - creds.NewParams(variables, plan.Put.Params), - plan.Put.Tags, - putInputs, - - delegate, - factory.pool, - factory.resourceConfigFactory, plan.ID, - workerMetadata, + *plan.Put, stepMetadata, - - creds.NewVersionedResourceTypes(variables, plan.Put.VersionedResourceTypes), - - factory.strategy, + containerMetadata, + factory.secretManager, factory.resourceFactory, + factory.resourceConfigFactory, + factory.strategy, + factory.pool, + delegate, ) return exec.LogError(putStep, delegate) @@ -143,66 +99,23 @@ func (factory *stepFactory) PutStep( func (factory *stepFactory) TaskStep( plan atc.Plan, - build db.Build, + stepMetadata exec.StepMetadata, containerMetadata db.ContainerMetadata, delegate exec.TaskDelegate, ) exec.Step { sum := sha1.Sum([]byte(plan.Task.Name)) - workingDirectory := filepath.Join("/tmp", "build", fmt.Sprintf("%x", sum[:4])) - - containerMetadata.WorkingDirectory = workingDirectory - - credMgrVariables := creds.NewVariables(factory.secretManager, build.TeamName(), build.PipelineName()) - - var taskConfigSource exec.TaskConfigSource - var taskVars []template.Variables - - if plan.Task.ConfigPath != "" { - // external task - construct a source which reads it from file - taskConfigSource = exec.FileConfigSource{ConfigPath: plan.Task.ConfigPath} - - // for interpolation - use 'vars' from the pipeline, and then fill remaining with cred mgr variables - taskVars = []template.Variables{template.StaticVariables(plan.Task.Vars), credMgrVariables} - } else { - // embedded task - first we take it - taskConfigSource = exec.StaticConfigSource{Config: plan.Task.Config} - - // for interpolation - use just cred mgr variables - taskVars = []template.Variables{credMgrVariables} - } - - // override params - taskConfigSource = &exec.OverrideParamsConfigSource{ConfigSource: taskConfigSource, Params: plan.Task.Params} - - // interpolate template vars - taskConfigSource = exec.InterpolateTemplateConfigSource{ConfigSource: taskConfigSource, Vars: taskVars} - - // validate - taskConfigSource = exec.ValidatingConfigSource{ConfigSource: taskConfigSource} + containerMetadata.WorkingDirectory = filepath.Join("/tmp", "build", fmt.Sprintf("%x", sum[:4])) taskStep := exec.NewTaskStep( - exec.Privileged(plan.Task.Privileged), - taskConfigSource, - plan.Task.Tags, - plan.Task.InputMapping, - plan.Task.OutputMapping, - - workingDirectory, - plan.Task.ImageArtifactName, - - delegate, - - factory.pool, - build.TeamID(), - build.ID(), - build.JobID(), - plan.Task.Name, plan.ID, - containerMetadata, - - creds.NewVersionedResourceTypes(credMgrVariables, plan.Task.VersionedResourceTypes), + *plan.Task, factory.defaultLimits, + stepMetadata, + containerMetadata, + factory.secretManager, factory.strategy, + factory.pool, + delegate, ) return exec.LogError(taskStep, delegate) diff --git a/atc/engine/engine.go b/atc/engine/engine.go index 6ece67398..8028ff8e6 100644 --- a/atc/engine/engine.go +++ b/atc/engine/engine.go @@ -16,14 +16,14 @@ import ( //go:generate counterfeiter . Engine type Engine interface { - LookupBuild(lager.Logger, db.Build) Build + NewBuild(db.Build) Runnable ReleaseAll(lager.Logger) } -//go:generate counterfeiter . Build +//go:generate counterfeiter . Runnable -type Build interface { - Resume(lager.Logger) +type Runnable interface { + Run(logger lager.Logger) } //go:generate counterfeiter . StepBuilder @@ -50,7 +50,19 @@ type engine struct { waitGroup *sync.WaitGroup } -func (engine *engine) LookupBuild(logger lager.Logger, build db.Build) Build { +func (engine *engine) ReleaseAll(logger lager.Logger) { + logger.Info("calling-release-on-builds") + + close(engine.release) + + logger.Info("waiting-on-builds") + + engine.waitGroup.Wait() + + logger.Info("finished-waiting-on-builds") +} + +func (engine *engine) NewBuild(build db.Build) Runnable { ctx, cancel := context.WithCancel(context.Background()) @@ -65,18 +77,6 @@ func (engine *engine) LookupBuild(logger lager.Logger, build db.Build) Build { ) } -func (engine *engine) ReleaseAll(logger lager.Logger) { - logger.Info("calling-release-on-builds") - - close(engine.release) - - logger.Info("waiting-on-builds") - - engine.waitGroup.Wait() - - logger.Info("finished-waiting-on-builds") -} - func NewBuild( ctx context.Context, cancel func(), @@ -85,8 +85,8 @@ func NewBuild( release chan bool, trackedStates *sync.Map, waitGroup *sync.WaitGroup, -) Build { - return &execBuild{ +) Runnable { + return &engineBuild{ ctx: ctx, cancel: cancel, @@ -99,7 +99,7 @@ func NewBuild( } } -type execBuild struct { +type engineBuild struct { ctx context.Context cancel func() @@ -111,17 +111,17 @@ type execBuild struct { waitGroup *sync.WaitGroup } -func (build *execBuild) Resume(logger lager.Logger) { - build.waitGroup.Add(1) - defer build.waitGroup.Done() +func (b *engineBuild) Run(logger lager.Logger) { + b.waitGroup.Add(1) + defer b.waitGroup.Done() logger = logger.WithData(lager.Data{ - "build": build.build.ID(), - "pipeline": build.build.PipelineName(), - "job": build.build.JobName(), + "build": b.build.ID(), + "pipeline": b.build.PipelineName(), + "job": b.build.JobName(), }) - lock, acquired, err := build.build.AcquireTrackingLock(logger, time.Minute) + lock, acquired, err := b.build.AcquireTrackingLock(logger, time.Minute) if err != nil { logger.Error("failed-to-get-lock", err) return @@ -134,7 +134,7 @@ func (build *execBuild) Resume(logger lager.Logger) { defer lock.Release() - found, err := build.build.Reload() + found, err := b.build.Reload() if err != nil { logger.Error("failed-to-load-build-from-db", err) return @@ -145,12 +145,12 @@ func (build *execBuild) Resume(logger lager.Logger) { return } - if !build.build.IsRunning() { + if !b.build.IsRunning() { logger.Info("build-already-finished") return } - notifier, err := build.build.AbortNotifier() + notifier, err := b.build.AbortNotifier() if err != nil { logger.Error("failed-to-listen-for-aborts", err) return @@ -158,19 +158,19 @@ func (build *execBuild) Resume(logger lager.Logger) { defer notifier.Close() - step, err := build.builder.BuildStep(build.build) + step, err := b.builder.BuildStep(b.build) if err != nil { logger.Error("failed-to-build-step", err) return } - build.trackStarted(logger) - defer build.trackFinished(logger) + b.trackStarted(logger) + defer b.trackFinished(logger) logger.Info("running") - state := build.runState() - defer build.clearRunState() + state := b.runState() + defer b.clearRunState() noleak := make(chan bool) defer close(noleak) @@ -180,62 +180,62 @@ func (build *execBuild) Resume(logger lager.Logger) { case <-noleak: case <-notifier.Notify(): logger.Info("aborting") - build.cancel() + b.cancel() } }() done := make(chan error) go func() { - ctx := lagerctx.NewContext(build.ctx, logger) + ctx := lagerctx.NewContext(b.ctx, logger) done <- step.Run(ctx, state) }() select { - case <-build.release: + case <-b.release: logger.Info("releasing") case err = <-done: - build.finish(logger.Session("finish"), err, step.Succeeded()) + b.finish(logger.Session("finish"), err, step.Succeeded()) } } -func (build *execBuild) finish(logger lager.Logger, err error, succeeded bool) { +func (b *engineBuild) finish(logger lager.Logger, err error, succeeded bool) { if err == context.Canceled { - build.saveStatus(logger, atc.StatusAborted) + b.saveStatus(logger, atc.StatusAborted) logger.Info("aborted") } else if err != nil { - build.saveStatus(logger, atc.StatusErrored) + b.saveStatus(logger, atc.StatusErrored) logger.Info("errored", lager.Data{"error": err.Error()}) } else if succeeded { - build.saveStatus(logger, atc.StatusSucceeded) + b.saveStatus(logger, atc.StatusSucceeded) logger.Info("succeeded") } else { - build.saveStatus(logger, atc.StatusFailed) + b.saveStatus(logger, atc.StatusFailed) logger.Info("failed") } } -func (build *execBuild) saveStatus(logger lager.Logger, status atc.BuildStatus) { - if err := build.build.Finish(db.BuildStatus(status)); err != nil { +func (b *engineBuild) saveStatus(logger lager.Logger, status atc.BuildStatus) { + if err := b.build.Finish(db.BuildStatus(status)); err != nil { logger.Error("failed-to-finish-build", err) } } -func (build *execBuild) trackStarted(logger lager.Logger) { +func (b *engineBuild) trackStarted(logger lager.Logger) { metric.BuildStarted{ - PipelineName: build.build.PipelineName(), - JobName: build.build.JobName(), - BuildName: build.build.Name(), - BuildID: build.build.ID(), - TeamName: build.build.TeamName(), + PipelineName: b.build.PipelineName(), + JobName: b.build.JobName(), + BuildName: b.build.Name(), + BuildID: b.build.ID(), + TeamName: b.build.TeamName(), }.Emit(logger) } -func (build *execBuild) trackFinished(logger lager.Logger) { - found, err := build.build.Reload() +func (b *engineBuild) trackFinished(logger lager.Logger) { + found, err := b.build.Reload() if err != nil { logger.Error("failed-to-load-build-from-db", err) return @@ -246,24 +246,24 @@ func (build *execBuild) trackFinished(logger lager.Logger) { return } - if !build.build.IsRunning() { + if !b.build.IsRunning() { metric.BuildFinished{ - PipelineName: build.build.PipelineName(), - JobName: build.build.JobName(), - BuildName: build.build.Name(), - BuildID: build.build.ID(), - BuildStatus: build.build.Status(), - BuildDuration: build.build.EndTime().Sub(build.build.StartTime()), - TeamName: build.build.TeamName(), + PipelineName: b.build.PipelineName(), + JobName: b.build.JobName(), + BuildName: b.build.Name(), + BuildID: b.build.ID(), + BuildStatus: b.build.Status(), + BuildDuration: b.build.EndTime().Sub(b.build.StartTime()), + TeamName: b.build.TeamName(), }.Emit(logger) } } -func (build *execBuild) runState() exec.RunState { +func (build *engineBuild) runState() exec.RunState { existingState, _ := build.trackedStates.LoadOrStore(build.build.ID(), exec.NewRunState()) return existingState.(exec.RunState) } -func (build *execBuild) clearRunState() { +func (build *engineBuild) clearRunState() { build.trackedStates.Delete(build.build.ID()) } diff --git a/atc/engine/engine_test.go b/atc/engine/engine_test.go index a20bc3566..df638e6c6 100644 --- a/atc/engine/engine_test.go +++ b/atc/engine/engine_test.go @@ -22,26 +22,20 @@ import ( var _ = Describe("Engine", func() { var ( - logger lager.Logger - fakeBuild *dbfakes.FakeBuild fakeStepBuilder *enginefakes.FakeStepBuilder ) BeforeEach(func() { - logger = lagertest.NewTestLogger("test") - fakeBuild = new(dbfakes.FakeBuild) fakeBuild.IDReturns(128) fakeStepBuilder = new(enginefakes.FakeStepBuilder) }) - Describe("LookupBuild", func() { + Describe("NewBuild", func() { var ( - build Build - err error - + build Runnable engine Engine ) @@ -50,12 +44,12 @@ var _ = Describe("Engine", func() { }) JustBeforeEach(func() { - build = engine.LookupBuild(logger, fakeBuild) + build = engine.NewBuild(fakeBuild) }) - It("succeeds", func() { - Expect(err).NotTo(HaveOccurred()) - }) + // It("succeeds", func() { + // Expect(err).NotTo(HaveOccurred()) + // }) It("returns a build", func() { Expect(build).NotTo(BeNil()) @@ -64,7 +58,7 @@ var _ = Describe("Engine", func() { Describe("Build", func() { var ( - build Build + build Runnable release chan bool cancel chan bool waitGroup *sync.WaitGroup @@ -89,7 +83,7 @@ var _ = Describe("Engine", func() { ) }) - Describe("Resume", func() { + Describe("Run", func() { var logger lager.Logger BeforeEach(func() { @@ -97,7 +91,7 @@ var _ = Describe("Engine", func() { }) JustBeforeEach(func() { - build.Resume(logger) + build.Run(logger) }) Context("when acquiring the lock succeeds", func() { diff --git a/atc/engine/enginefakes/fake_build.go b/atc/engine/enginefakes/fake_build.go deleted file mode 100644 index a71219b2a..000000000 --- a/atc/engine/enginefakes/fake_build.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package enginefakes - -import ( - "sync" - - "code.cloudfoundry.org/lager" - "github.com/concourse/concourse/atc/engine" -) - -type FakeBuild struct { - ResumeStub func(lager.Logger) - resumeMutex sync.RWMutex - resumeArgsForCall []struct { - arg1 lager.Logger - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeBuild) Resume(arg1 lager.Logger) { - fake.resumeMutex.Lock() - fake.resumeArgsForCall = append(fake.resumeArgsForCall, struct { - arg1 lager.Logger - }{arg1}) - fake.recordInvocation("Resume", []interface{}{arg1}) - fake.resumeMutex.Unlock() - if fake.ResumeStub != nil { - fake.ResumeStub(arg1) - } -} - -func (fake *FakeBuild) ResumeCallCount() int { - fake.resumeMutex.RLock() - defer fake.resumeMutex.RUnlock() - return len(fake.resumeArgsForCall) -} - -func (fake *FakeBuild) ResumeCalls(stub func(lager.Logger)) { - fake.resumeMutex.Lock() - defer fake.resumeMutex.Unlock() - fake.ResumeStub = stub -} - -func (fake *FakeBuild) ResumeArgsForCall(i int) lager.Logger { - fake.resumeMutex.RLock() - defer fake.resumeMutex.RUnlock() - argsForCall := fake.resumeArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeBuild) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.resumeMutex.RLock() - defer fake.resumeMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeBuild) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ engine.Build = new(FakeBuild) diff --git a/atc/engine/enginefakes/fake_engine.go b/atc/engine/enginefakes/fake_engine.go index 424426943..c434c8212 100644 --- a/atc/engine/enginefakes/fake_engine.go +++ b/atc/engine/enginefakes/fake_engine.go @@ -10,17 +10,16 @@ import ( ) type FakeEngine struct { - LookupBuildStub func(lager.Logger, db.Build) engine.Build - lookupBuildMutex sync.RWMutex - lookupBuildArgsForCall []struct { - arg1 lager.Logger - arg2 db.Build + NewBuildStub func(db.Build) engine.Runnable + newBuildMutex sync.RWMutex + newBuildArgsForCall []struct { + arg1 db.Build } - lookupBuildReturns struct { - result1 engine.Build + newBuildReturns struct { + result1 engine.Runnable } - lookupBuildReturnsOnCall map[int]struct { - result1 engine.Build + newBuildReturnsOnCall map[int]struct { + result1 engine.Runnable } ReleaseAllStub func(lager.Logger) releaseAllMutex sync.RWMutex @@ -31,64 +30,63 @@ type FakeEngine struct { invocationsMutex sync.RWMutex } -func (fake *FakeEngine) LookupBuild(arg1 lager.Logger, arg2 db.Build) engine.Build { - fake.lookupBuildMutex.Lock() - ret, specificReturn := fake.lookupBuildReturnsOnCall[len(fake.lookupBuildArgsForCall)] - fake.lookupBuildArgsForCall = append(fake.lookupBuildArgsForCall, struct { - arg1 lager.Logger - arg2 db.Build - }{arg1, arg2}) - fake.recordInvocation("LookupBuild", []interface{}{arg1, arg2}) - fake.lookupBuildMutex.Unlock() - if fake.LookupBuildStub != nil { - return fake.LookupBuildStub(arg1, arg2) +func (fake *FakeEngine) NewBuild(arg1 db.Build) engine.Runnable { + fake.newBuildMutex.Lock() + ret, specificReturn := fake.newBuildReturnsOnCall[len(fake.newBuildArgsForCall)] + fake.newBuildArgsForCall = append(fake.newBuildArgsForCall, struct { + arg1 db.Build + }{arg1}) + fake.recordInvocation("NewBuild", []interface{}{arg1}) + fake.newBuildMutex.Unlock() + if fake.NewBuildStub != nil { + return fake.NewBuildStub(arg1) } if specificReturn { return ret.result1 } - fakeReturns := fake.lookupBuildReturns + fakeReturns := fake.newBuildReturns return fakeReturns.result1 } -func (fake *FakeEngine) LookupBuildCallCount() int { - fake.lookupBuildMutex.RLock() - defer fake.lookupBuildMutex.RUnlock() - return len(fake.lookupBuildArgsForCall) +func (fake *FakeEngine) NewBuildCallCount() int { + fake.newBuildMutex.RLock() + defer fake.newBuildMutex.RUnlock() + return len(fake.newBuildArgsForCall) } -func (fake *FakeEngine) LookupBuildCalls(stub func(lager.Logger, db.Build) engine.Build) { - fake.lookupBuildMutex.Lock() - defer fake.lookupBuildMutex.Unlock() - fake.LookupBuildStub = stub +func (fake *FakeEngine) NewBuildCalls(stub func(db.Build) engine.Runnable) { + fake.newBuildMutex.Lock() + defer fake.newBuildMutex.Unlock() + fake.NewBuildStub = stub } -func (fake *FakeEngine) LookupBuildArgsForCall(i int) (lager.Logger, db.Build) { - fake.lookupBuildMutex.RLock() - defer fake.lookupBuildMutex.RUnlock() - argsForCall := fake.lookupBuildArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 +func (fake *FakeEngine) NewBuildArgsForCall(i int) db.Build { + fake.newBuildMutex.RLock() + defer fake.newBuildMutex.RUnlock() + argsForCall := fake.newBuildArgsForCall[i] + return argsForCall.arg1 } -func (fake *FakeEngine) LookupBuildReturns(result1 engine.Build) { - fake.lookupBuildMutex.Lock() - defer fake.lookupBuildMutex.Unlock() - fake.LookupBuildStub = nil - fake.lookupBuildReturns = struct { - result1 engine.Build +func (fake *FakeEngine) NewBuildReturns(result1 engine.Runnable) { + fake.newBuildMutex.Lock() + defer fake.newBuildMutex.Unlock() + fake.NewBuildStub = nil + fake.newBuildReturns = struct { + result1 engine.Runnable }{result1} } -func (fake *FakeEngine) LookupBuildReturnsOnCall(i int, result1 engine.Build) { - fake.lookupBuildMutex.Lock() - defer fake.lookupBuildMutex.Unlock() - fake.LookupBuildStub = nil - if fake.lookupBuildReturnsOnCall == nil { - fake.lookupBuildReturnsOnCall = make(map[int]struct { - result1 engine.Build +func (fake *FakeEngine) NewBuildReturnsOnCall(i int, result1 engine.Runnable) { + fake.newBuildMutex.Lock() + defer fake.newBuildMutex.Unlock() + fake.NewBuildStub = nil + if fake.newBuildReturnsOnCall == nil { + fake.newBuildReturnsOnCall = make(map[int]struct { + result1 engine.Runnable }) } - fake.lookupBuildReturnsOnCall[i] = struct { - result1 engine.Build + fake.newBuildReturnsOnCall[i] = struct { + result1 engine.Runnable }{result1} } @@ -126,8 +124,8 @@ func (fake *FakeEngine) ReleaseAllArgsForCall(i int) lager.Logger { func (fake *FakeEngine) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.lookupBuildMutex.RLock() - defer fake.lookupBuildMutex.RUnlock() + fake.newBuildMutex.RLock() + defer fake.newBuildMutex.RUnlock() fake.releaseAllMutex.RLock() defer fake.releaseAllMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} diff --git a/atc/engine/enginefakes/fake_runnable.go b/atc/engine/enginefakes/fake_runnable.go new file mode 100644 index 000000000..357fefda1 --- /dev/null +++ b/atc/engine/enginefakes/fake_runnable.go @@ -0,0 +1,76 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package enginefakes + +import ( + "sync" + + "code.cloudfoundry.org/lager" + "github.com/concourse/concourse/atc/engine" +) + +type FakeRunnable struct { + RunStub func(lager.Logger) + runMutex sync.RWMutex + runArgsForCall []struct { + arg1 lager.Logger + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeRunnable) Run(arg1 lager.Logger) { + fake.runMutex.Lock() + fake.runArgsForCall = append(fake.runArgsForCall, struct { + arg1 lager.Logger + }{arg1}) + fake.recordInvocation("Run", []interface{}{arg1}) + fake.runMutex.Unlock() + if fake.RunStub != nil { + fake.RunStub(arg1) + } +} + +func (fake *FakeRunnable) RunCallCount() int { + fake.runMutex.RLock() + defer fake.runMutex.RUnlock() + return len(fake.runArgsForCall) +} + +func (fake *FakeRunnable) RunCalls(stub func(lager.Logger)) { + fake.runMutex.Lock() + defer fake.runMutex.Unlock() + fake.RunStub = stub +} + +func (fake *FakeRunnable) RunArgsForCall(i int) lager.Logger { + fake.runMutex.RLock() + defer fake.runMutex.RUnlock() + argsForCall := fake.runArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeRunnable) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.runMutex.RLock() + defer fake.runMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeRunnable) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ engine.Runnable = new(FakeRunnable) diff --git a/atc/exec/execfakes/fake_get_delegate.go b/atc/exec/execfakes/fake_get_delegate.go index ade968b65..e3c34746b 100644 --- a/atc/exec/execfakes/fake_get_delegate.go +++ b/atc/exec/execfakes/fake_get_delegate.go @@ -6,6 +6,7 @@ import ( "sync" "code.cloudfoundry.org/lager" + "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/exec" ) @@ -40,6 +41,13 @@ type FakeGetDelegate struct { initializingArgsForCall []struct { arg1 lager.Logger } + SaveVersionStub func(lager.Logger, string, exec.VersionInfo) + saveVersionMutex sync.RWMutex + saveVersionArgsForCall []struct { + arg1 lager.Logger + arg2 string + arg3 exec.VersionInfo + } StartingStub func(lager.Logger) startingMutex sync.RWMutex startingArgsForCall []struct { @@ -65,6 +73,13 @@ type FakeGetDelegate struct { stdoutReturnsOnCall map[int]struct { result1 io.Writer } + UpdateVersionStub func(lager.Logger, atc.GetPlan, exec.VersionInfo) + updateVersionMutex sync.RWMutex + updateVersionArgsForCall []struct { + arg1 lager.Logger + arg2 atc.GetPlan + arg3 exec.VersionInfo + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -225,6 +240,39 @@ func (fake *FakeGetDelegate) InitializingArgsForCall(i int) lager.Logger { return argsForCall.arg1 } +func (fake *FakeGetDelegate) SaveVersion(arg1 lager.Logger, arg2 string, arg3 exec.VersionInfo) { + fake.saveVersionMutex.Lock() + fake.saveVersionArgsForCall = append(fake.saveVersionArgsForCall, struct { + arg1 lager.Logger + arg2 string + arg3 exec.VersionInfo + }{arg1, arg2, arg3}) + fake.recordInvocation("SaveVersion", []interface{}{arg1, arg2, arg3}) + fake.saveVersionMutex.Unlock() + if fake.SaveVersionStub != nil { + fake.SaveVersionStub(arg1, arg2, arg3) + } +} + +func (fake *FakeGetDelegate) SaveVersionCallCount() int { + fake.saveVersionMutex.RLock() + defer fake.saveVersionMutex.RUnlock() + return len(fake.saveVersionArgsForCall) +} + +func (fake *FakeGetDelegate) SaveVersionCalls(stub func(lager.Logger, string, exec.VersionInfo)) { + fake.saveVersionMutex.Lock() + defer fake.saveVersionMutex.Unlock() + fake.SaveVersionStub = stub +} + +func (fake *FakeGetDelegate) SaveVersionArgsForCall(i int) (lager.Logger, string, exec.VersionInfo) { + fake.saveVersionMutex.RLock() + defer fake.saveVersionMutex.RUnlock() + argsForCall := fake.saveVersionArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + func (fake *FakeGetDelegate) Starting(arg1 lager.Logger) { fake.startingMutex.Lock() fake.startingArgsForCall = append(fake.startingArgsForCall, struct { @@ -360,6 +408,39 @@ func (fake *FakeGetDelegate) StdoutReturnsOnCall(i int, result1 io.Writer) { }{result1} } +func (fake *FakeGetDelegate) UpdateVersion(arg1 lager.Logger, arg2 atc.GetPlan, arg3 exec.VersionInfo) { + fake.updateVersionMutex.Lock() + fake.updateVersionArgsForCall = append(fake.updateVersionArgsForCall, struct { + arg1 lager.Logger + arg2 atc.GetPlan + arg3 exec.VersionInfo + }{arg1, arg2, arg3}) + fake.recordInvocation("UpdateVersion", []interface{}{arg1, arg2, arg3}) + fake.updateVersionMutex.Unlock() + if fake.UpdateVersionStub != nil { + fake.UpdateVersionStub(arg1, arg2, arg3) + } +} + +func (fake *FakeGetDelegate) UpdateVersionCallCount() int { + fake.updateVersionMutex.RLock() + defer fake.updateVersionMutex.RUnlock() + return len(fake.updateVersionArgsForCall) +} + +func (fake *FakeGetDelegate) UpdateVersionCalls(stub func(lager.Logger, atc.GetPlan, exec.VersionInfo)) { + fake.updateVersionMutex.Lock() + defer fake.updateVersionMutex.Unlock() + fake.UpdateVersionStub = stub +} + +func (fake *FakeGetDelegate) UpdateVersionArgsForCall(i int) (lager.Logger, atc.GetPlan, exec.VersionInfo) { + fake.updateVersionMutex.RLock() + defer fake.updateVersionMutex.RUnlock() + argsForCall := fake.updateVersionArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + func (fake *FakeGetDelegate) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -371,12 +452,16 @@ func (fake *FakeGetDelegate) Invocations() map[string][][]interface{} { defer fake.imageVersionDeterminedMutex.RUnlock() fake.initializingMutex.RLock() defer fake.initializingMutex.RUnlock() + fake.saveVersionMutex.RLock() + defer fake.saveVersionMutex.RUnlock() fake.startingMutex.RLock() defer fake.startingMutex.RUnlock() fake.stderrMutex.RLock() defer fake.stderrMutex.RUnlock() fake.stdoutMutex.RLock() defer fake.stdoutMutex.RUnlock() + fake.updateVersionMutex.RLock() + defer fake.updateVersionMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/atc/exec/execfakes/fake_put_delegate.go b/atc/exec/execfakes/fake_put_delegate.go index 3cc051371..c45db8753 100644 --- a/atc/exec/execfakes/fake_put_delegate.go +++ b/atc/exec/execfakes/fake_put_delegate.go @@ -6,6 +6,7 @@ import ( "sync" "code.cloudfoundry.org/lager" + "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/exec" ) @@ -40,6 +41,15 @@ type FakePutDelegate struct { initializingArgsForCall []struct { arg1 lager.Logger } + SaveOutputStub func(lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, exec.VersionInfo) + saveOutputMutex sync.RWMutex + saveOutputArgsForCall []struct { + arg1 lager.Logger + arg2 atc.PutPlan + arg3 atc.Source + arg4 atc.VersionedResourceTypes + arg5 exec.VersionInfo + } StartingStub func(lager.Logger) startingMutex sync.RWMutex startingArgsForCall []struct { @@ -225,6 +235,41 @@ func (fake *FakePutDelegate) InitializingArgsForCall(i int) lager.Logger { return argsForCall.arg1 } +func (fake *FakePutDelegate) SaveOutput(arg1 lager.Logger, arg2 atc.PutPlan, arg3 atc.Source, arg4 atc.VersionedResourceTypes, arg5 exec.VersionInfo) { + fake.saveOutputMutex.Lock() + fake.saveOutputArgsForCall = append(fake.saveOutputArgsForCall, struct { + arg1 lager.Logger + arg2 atc.PutPlan + arg3 atc.Source + arg4 atc.VersionedResourceTypes + arg5 exec.VersionInfo + }{arg1, arg2, arg3, arg4, arg5}) + fake.recordInvocation("SaveOutput", []interface{}{arg1, arg2, arg3, arg4, arg5}) + fake.saveOutputMutex.Unlock() + if fake.SaveOutputStub != nil { + fake.SaveOutputStub(arg1, arg2, arg3, arg4, arg5) + } +} + +func (fake *FakePutDelegate) SaveOutputCallCount() int { + fake.saveOutputMutex.RLock() + defer fake.saveOutputMutex.RUnlock() + return len(fake.saveOutputArgsForCall) +} + +func (fake *FakePutDelegate) SaveOutputCalls(stub func(lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, exec.VersionInfo)) { + fake.saveOutputMutex.Lock() + defer fake.saveOutputMutex.Unlock() + fake.SaveOutputStub = stub +} + +func (fake *FakePutDelegate) SaveOutputArgsForCall(i int) (lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, exec.VersionInfo) { + fake.saveOutputMutex.RLock() + defer fake.saveOutputMutex.RUnlock() + argsForCall := fake.saveOutputArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 +} + func (fake *FakePutDelegate) Starting(arg1 lager.Logger) { fake.startingMutex.Lock() fake.startingArgsForCall = append(fake.startingArgsForCall, struct { @@ -371,6 +416,8 @@ func (fake *FakePutDelegate) Invocations() map[string][][]interface{} { defer fake.imageVersionDeterminedMutex.RUnlock() fake.initializingMutex.RLock() defer fake.initializingMutex.RUnlock() + fake.saveOutputMutex.RLock() + defer fake.saveOutputMutex.RUnlock() fake.startingMutex.RLock() defer fake.startingMutex.RUnlock() fake.stderrMutex.RLock() diff --git a/atc/exec/get_step.go b/atc/exec/get_step.go index 1409443cd..20a712686 100644 --- a/atc/exec/get_step.go +++ b/atc/exec/get_step.go @@ -41,90 +41,48 @@ type GetDelegate interface { Initializing(lager.Logger) Starting(lager.Logger) Finished(lager.Logger, ExitStatus, VersionInfo) + UpdateVersion(lager.Logger, atc.GetPlan, VersionInfo) } // GetStep will fetch a version of a resource on a worker that supports the // resource type. type GetStep struct { - build db.Build - - name string - resourceType string - resource string - source creds.Source - params creds.Params - versionSource VersionSource - tags atc.Tags - - delegate GetDelegate - - resourceFetcher resource.Fetcher - teamID int - buildID int - planID atc.PlanID - containerMetadata db.ContainerMetadata - dbResourceCacheFactory db.ResourceCacheFactory - stepMetadata StepMetadata - - resourceTypes creds.VersionedResourceTypes - - succeeded bool - - strategy worker.ContainerPlacementStrategy - workerPool worker.Pool + planID atc.PlanID + plan atc.GetPlan + metadata StepMetadata + containerMetadata db.ContainerMetadata + secrets creds.Secrets + resourceFetcher resource.Fetcher + resourceCacheFactory db.ResourceCacheFactory + strategy worker.ContainerPlacementStrategy + workerPool worker.Pool + delegate GetDelegate + succeeded bool } func NewGetStep( - build db.Build, - - name string, - resourceType string, - resource string, - source creds.Source, - params creds.Params, - versionSource VersionSource, - tags atc.Tags, - - delegate GetDelegate, - - resourceFetcher resource.Fetcher, - teamID int, - buildID int, planID atc.PlanID, + plan atc.GetPlan, + metadata StepMetadata, containerMetadata db.ContainerMetadata, - dbResourceCacheFactory db.ResourceCacheFactory, - stepMetadata StepMetadata, - - resourceTypes creds.VersionedResourceTypes, - + secrets creds.Secrets, + resourceFetcher resource.Fetcher, + resourceCacheFactory db.ResourceCacheFactory, strategy worker.ContainerPlacementStrategy, workerPool worker.Pool, + delegate GetDelegate, ) Step { return &GetStep{ - build: build, - - name: name, - resourceType: resourceType, - resource: resource, - source: source, - params: params, - versionSource: versionSource, - tags: tags, - - delegate: delegate, - - resourceFetcher: resourceFetcher, - teamID: teamID, - buildID: buildID, - planID: planID, - containerMetadata: containerMetadata, - dbResourceCacheFactory: dbResourceCacheFactory, - stepMetadata: stepMetadata, - - resourceTypes: resourceTypes, - - strategy: strategy, - workerPool: workerPool, + planID: planID, + plan: plan, + metadata: metadata, + containerMetadata: containerMetadata, + secrets: secrets, + resourceFetcher: resourceFetcher, + resourceCacheFactory: resourceCacheFactory, + strategy: strategy, + workerPool: workerPool, + delegate: delegate, } } @@ -154,35 +112,56 @@ func NewGetStep( func (step *GetStep) Run(ctx context.Context, state RunState) error { logger := lagerctx.FromContext(ctx) logger = logger.Session("get-step", lager.Data{ - "step-name": step.name, - "job-id": step.build.JobID(), + "step-name": step.plan.Name, + "job-id": step.metadata.JobID, }) step.delegate.Initializing(logger) - version, err := step.versionSource.Version(state) + variables := creds.NewVariables(step.secrets, step.metadata.TeamName, step.metadata.PipelineName) + + source, err := creds.NewSource(variables, step.plan.Source).Evaluate() if err != nil { return err } - source, err := step.source.Evaluate() + params, err := creds.NewParams(variables, step.plan.Params).Evaluate() if err != nil { return err } - params, err := step.params.Evaluate() + resourceTypes, err := creds.NewVersionedResourceTypes(variables, step.plan.VersionedResourceTypes).Evaluate() if err != nil { return err } - resourceCache, err := step.dbResourceCacheFactory.FindOrCreateResourceCache( - logger, - db.ForBuild(step.buildID), - step.resourceType, + version, err := NewVersionSourceFromPlan(&step.plan).Version(state) + if err != nil { + return err + } + + containerSpec := worker.ContainerSpec{ + ImageSpec: worker.ImageSpec{ + ResourceType: step.plan.Type, + }, + TeamID: step.metadata.TeamID, + Env: step.metadata.Env(), + } + + workerSpec := worker.WorkerSpec{ + ResourceType: step.plan.Type, + Tags: step.plan.Tags, + TeamID: step.metadata.TeamID, + ResourceTypes: resourceTypes, + } + + resourceCache, err := step.resourceCacheFactory.FindOrCreateResourceCache( + db.ForBuild(step.metadata.BuildID), + step.plan.Type, version, source, params, - step.resourceTypes, + resourceTypes, ) if err != nil { logger.Error("failed-to-create-resource-cache", err) @@ -190,31 +169,24 @@ func (step *GetStep) Run(ctx context.Context, state RunState) error { } resourceInstance := resource.NewResourceInstance( - resource.ResourceType(step.resourceType), + resource.ResourceType(step.plan.Type), version, source, params, - step.resourceTypes, + resourceTypes, resourceCache, - db.NewBuildStepContainerOwner(step.buildID, step.planID, step.teamID), + db.NewBuildStepContainerOwner(step.metadata.BuildID, step.planID, step.metadata.TeamID), ) - containerSpec := worker.ContainerSpec{ - ImageSpec: worker.ImageSpec{ - ResourceType: step.resourceType, - }, - TeamID: step.teamID, - Env: step.stepMetadata.Env(), - } - - workerSpec := worker.WorkerSpec{ - ResourceType: step.resourceType, - Tags: step.tags, - TeamID: step.teamID, - ResourceTypes: step.resourceTypes, - } - - chosenWorker, err := step.workerPool.FindOrChooseWorkerForContainer(logger, resourceInstance.ContainerOwner(), containerSpec, workerSpec, step.strategy) + chosenWorker, err := step.workerPool.FindOrChooseWorkerForContainer( + ctx, + logger, + resourceInstance.ContainerOwner(), + containerSpec, + step.containerMetadata, + workerSpec, + step.strategy, + ) if err != nil { return err } @@ -229,7 +201,7 @@ func (step *GetStep) Run(ctx context.Context, state RunState) error { }, chosenWorker, containerSpec, - step.resourceTypes, + resourceTypes, resourceInstance, step.delegate, ) @@ -244,50 +216,23 @@ func (step *GetStep) Run(ctx context.Context, state RunState) error { return err } - state.Artifacts().RegisterSource(artifact.Name(step.name), &getArtifactSource{ + state.Artifacts().RegisterSource(artifact.Name(step.plan.Name), &getArtifactSource{ resourceInstance: resourceInstance, versionedSource: versionedSource, }) - if step.resource != "" { - pipeline, found, err := step.build.Pipeline() - if err != nil { - logger.Error("failed-to-find-pipeline", err, lager.Data{"name": step.name, "pipeline-name": step.build.PipelineName(), "pipeline-id": step.build.PipelineID()}) - return err - } + versionInfo := VersionInfo{ + Version: versionedSource.Version(), + Metadata: versionedSource.Metadata(), + } - if !found { - logger.Debug("pipeline-not-found", lager.Data{"name": step.name, "pipeline-name": step.build.PipelineName(), "pipeline-id": step.build.PipelineID()}) - return ErrPipelineNotFound{step.build.PipelineName()} - } - - resource, found, err := pipeline.Resource(step.resource) - if err != nil { - logger.Error("failed-to-find-resource", err, lager.Data{"name": step.name, "pipeline-name": step.build.PipelineName(), "resource": step.resource}) - return err - } - - if !found { - logger.Debug("resource-not-found", lager.Data{"name": step.name, "pipeline-name": step.build.PipelineName(), "resource": step.resource}) - return ErrResourceNotFound{step.resource} - } - - // Find or Save* the version used in the get step, and update the Metadata - // *saving will occur when the resource's config has changed, but it hasn't - // checked yet, so the resource config versions don't exist - _, err = resource.SaveUncheckedVersion(versionedSource.Version(), db.NewResourceConfigMetadataFields(versionedSource.Metadata()), resourceCache.ResourceConfig(), step.resourceTypes) - if err != nil { - logger.Error("failed-to-save-resource-config-version", err, lager.Data{"name": step.name, "resource": step.resource, "version": versionedSource.Version()}) - return err - } + if step.plan.Resource != "" { + step.delegate.UpdateVersion(logger, step.plan, versionInfo) } step.succeeded = true - step.delegate.Finished(logger, 0, VersionInfo{ - Version: versionedSource.Version(), - Metadata: versionedSource.Metadata(), - }) + step.delegate.Finished(logger, 0, versionInfo) return nil } diff --git a/atc/exec/get_step_test.go b/atc/exec/get_step_test.go index c2f225d49..2290dbf9b 100644 --- a/atc/exec/get_step_test.go +++ b/atc/exec/get_step_test.go @@ -12,7 +12,6 @@ import ( "code.cloudfoundry.org/lager/lagertest" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/creds/credsfakes" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/dbfakes" @@ -40,13 +39,11 @@ var _ = Describe("GetStep", func() { fakeResourceFetcher *resourcefakes.FakeFetcher fakeResourceCacheFactory *dbfakes.FakeResourceCacheFactory fakeSecretManager *credsfakes.FakeSecrets - variables creds.Variables - fakeBuild *dbfakes.FakeBuild fakeDelegate *execfakes.FakeGetDelegate getPlan *atc.GetPlan - fakeVersionedSource *resourcefakes.FakeVersionedSource - resourceTypes atc.VersionedResourceTypes + fakeVersionedSource *resourcefakes.FakeVersionedSource + interpolatedResourceTypes atc.VersionedResourceTypes artifactRepository *artifact.Repository state *execfakes.FakeRunState @@ -55,16 +52,22 @@ var _ = Describe("GetStep", func() { stepErr error containerMetadata = db.ContainerMetadata{ - PipelineID: 4567, - Type: db.ContainerTypeGet, - StepName: "some-step", + WorkingDirectory: resource.ResourcesDir("get"), + PipelineID: 4567, + Type: db.ContainerTypeGet, + StepName: "some-step", } - stepMetadata testMetadata = []string{"a=1", "b=2"} + stepMetadata = exec.StepMetadata{ + TeamID: 123, + TeamName: "some-team", + BuildID: 42, + BuildName: "some-build", + PipelineID: 4567, + PipelineName: "some-pipeline", + } - teamID = 123 - buildID = 42 - planID = 56 + planID = 56 ) BeforeEach(func() { @@ -79,7 +82,6 @@ var _ = Describe("GetStep", func() { fakeSecretManager = new(credsfakes.FakeSecrets) fakeSecretManager.GetReturns("super-secret-source", nil, true, nil) - variables = creds.NewVariables(fakeSecretManager, "team", "pipeline") artifactRepository = artifact.NewRepository() state = new(execfakes.FakeRunState) @@ -88,19 +90,25 @@ var _ = Describe("GetStep", func() { fakeVersionedSource = new(resourcefakes.FakeVersionedSource) fakeResourceFetcher.FetchReturns(fakeVersionedSource, nil) - fakeBuild = new(dbfakes.FakeBuild) - fakeBuild.IDReturns(buildID) - fakeBuild.TeamIDReturns(teamID) - fakeBuild.PipelineNameReturns("pipeline") - fakeDelegate = new(execfakes.FakeGetDelegate) - resourceTypes = atc.VersionedResourceTypes{ + uninterpolatedResourceTypes := atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "custom-resource", Type: "custom-type", - Source: atc.Source{"some-custom": "source"}, + Source: atc.Source{"some-custom": "((source-param))"}, + }, + Version: atc.Version{"some-custom": "version"}, + }, + } + + interpolatedResourceTypes = atc.VersionedResourceTypes{ + { + ResourceType: atc.ResourceType{ + Name: "custom-resource", + Type: "custom-type", + Source: atc.Source{"some-custom": "super-secret-source"}, }, Version: atc.Version{"some-custom": "version"}, }, @@ -113,10 +121,8 @@ var _ = Describe("GetStep", func() { Params: atc.Params{"some-param": "some-value"}, Tags: []string{"some", "tags"}, Version: &atc.Version{"some-version": "some-value"}, - VersionedResourceTypes: resourceTypes, + VersionedResourceTypes: uninterpolatedResourceTypes, } - - containerMetadata.WorkingDirectory = resource.ResourcesDir("get") }) AfterEach(func() { @@ -129,32 +135,17 @@ var _ = Describe("GetStep", func() { Get: getPlan, } - variables := creds.NewVariables(fakeSecretManager, fakeBuild.TeamName(), fakeBuild.PipelineName()) - getStep = exec.NewGetStep( - fakeBuild, - - plan.Get.Name, - plan.Get.Type, - plan.Get.Resource, - creds.NewSource(variables, plan.Get.Source), - creds.NewParams(variables, plan.Get.Params), - exec.NewVersionSourceFromPlan(plan.Get), - plan.Get.Tags, - - fakeDelegate, - fakeResourceFetcher, - fakeBuild.TeamID(), - fakeBuild.ID(), plan.ID, - containerMetadata, - fakeResourceCacheFactory, + *plan.Get, stepMetadata, - - creds.NewVersionedResourceTypes(variables, plan.Get.VersionedResourceTypes), - + containerMetadata, + fakeSecretManager, + fakeResourceFetcher, + fakeResourceCacheFactory, fakeStrategy, fakePool, + fakeDelegate, ) stepErr = getStep.Run(ctx, state) @@ -162,20 +153,21 @@ var _ = Describe("GetStep", func() { It("finds or chooses a worker", func() { Expect(fakePool.FindOrChooseWorkerForContainerCallCount()).To(Equal(1)) - _, actualOwner, actualContainerSpec, actualWorkerSpec, strategy := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) - Expect(actualOwner).To(Equal(db.NewBuildStepContainerOwner(buildID, atc.PlanID(planID), teamID))) + _, _, actualOwner, actualContainerSpec, actualContainerMetadata, actualWorkerSpec, strategy := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + Expect(actualOwner).To(Equal(db.NewBuildStepContainerOwner(stepMetadata.BuildID, atc.PlanID(planID), stepMetadata.TeamID))) Expect(actualContainerSpec).To(Equal(worker.ContainerSpec{ ImageSpec: worker.ImageSpec{ ResourceType: "some-resource-type", }, - TeamID: teamID, + TeamID: stepMetadata.TeamID, Env: stepMetadata.Env(), })) + Expect(actualContainerMetadata).To(Equal(containerMetadata)) Expect(actualWorkerSpec).To(Equal(worker.WorkerSpec{ ResourceType: "some-resource-type", Tags: atc.Tags{"some", "tags"}, - TeamID: teamID, - ResourceTypes: creds.NewVersionedResourceTypes(variables, resourceTypes), + TeamID: stepMetadata.TeamID, + ResourceTypes: interpolatedResourceTypes, })) Expect(strategy).To(Equal(fakeStrategy)) }) @@ -205,7 +197,7 @@ var _ = Describe("GetStep", func() { ImageSpec: worker.ImageSpec{ ResourceType: "some-resource-type", }, - TeamID: teamID, + TeamID: stepMetadata.TeamID, Env: stepMetadata.Env(), })) Expect(resourceInstance).To(Equal(resource.NewResourceInstance( @@ -213,11 +205,11 @@ var _ = Describe("GetStep", func() { atc.Version{"some-version": "some-value"}, atc.Source{"some": "super-secret-source"}, atc.Params{"some-param": "some-value"}, - creds.NewVersionedResourceTypes(variables, resourceTypes), + interpolatedResourceTypes, nil, - db.NewBuildStepContainerOwner(buildID, atc.PlanID(planID), teamID), + db.NewBuildStepContainerOwner(stepMetadata.BuildID, atc.PlanID(planID), stepMetadata.TeamID), ))) - Expect(actualResourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, resourceTypes))) + Expect(actualResourceTypes).To(Equal(interpolatedResourceTypes)) Expect(delegate).To(Equal(fakeDelegate)) expectedLockName := fmt.Sprintf("%x", sha256.Sum256([]byte( @@ -231,7 +223,7 @@ var _ = Describe("GetStep", func() { Context("when fetching resource succeeds", func() { BeforeEach(func() { fakeVersionedSource.VersionReturns(atc.Version{"some": "version"}) - fakeVersionedSource.MetadataReturns([]atc.MetadataField{{"some", "metadata"}}) + fakeVersionedSource.MetadataReturns([]atc.MetadataField{{Name: "some", Value: "metadata"}}) }) It("returns nil", func() { @@ -247,132 +239,31 @@ var _ = Describe("GetStep", func() { _, status, info := fakeDelegate.FinishedArgsForCall(0) Expect(status).To(Equal(exec.ExitStatus(0))) Expect(info.Version).To(Equal(atc.Version{"some": "version"})) - Expect(info.Metadata).To(Equal([]atc.MetadataField{{"some", "metadata"}})) + Expect(info.Metadata).To(Equal([]atc.MetadataField{{Name: "some", Value: "metadata"}})) }) - Context("when getting a pipeline resource", func() { - var fakeResourceCache *dbfakes.FakeUsedResourceCache - var fakeResourceConfig *dbfakes.FakeResourceConfig - + Context("when the plan has a resource", func() { BeforeEach(func() { getPlan.Resource = "some-pipeline-resource" - - fakeResourceCache = new(dbfakes.FakeUsedResourceCache) - fakeResourceConfig = new(dbfakes.FakeResourceConfig) - fakeResourceCache.ResourceConfigReturns(fakeResourceConfig) - fakeResourceCacheFactory.FindOrCreateResourceCacheReturns(fakeResourceCache, nil) }) - It("finds the pipeline", func() { - Expect(fakeBuild.PipelineCallCount()).To(Equal(1)) - }) - - Context("when finding the pipeline succeeds", func() { - var fakePipeline *dbfakes.FakePipeline - - BeforeEach(func() { - fakePipeline = new(dbfakes.FakePipeline) - fakeBuild.PipelineReturns(fakePipeline, true, nil) - }) - - It("finds the resource", func() { - Expect(fakePipeline.ResourceCallCount()).To(Equal(1)) - - Expect(fakePipeline.ResourceArgsForCall(0)).To(Equal(getPlan.Resource)) - }) - - Context("when finding the resource succeeds", func() { - var fakeResource *dbfakes.FakeResource - - BeforeEach(func() { - fakeResource = new(dbfakes.FakeResource) - fakePipeline.ResourceReturns(fakeResource, true, nil) - }) - - It("saves the resource config version", func() { - Expect(fakeResource.SaveUncheckedVersionCallCount()).To(Equal(1)) - - version, metadata, resourceConfig, actualResourceTypes := fakeResource.SaveUncheckedVersionArgsForCall(0) - Expect(version).To(Equal(atc.Version{"some": "version"})) - Expect(metadata).To(Equal(db.NewResourceConfigMetadataFields([]atc.MetadataField{{"some", "metadata"}}))) - Expect(resourceConfig).To(Equal(fakeResourceConfig)) - Expect(actualResourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, resourceTypes))) - }) - - Context("when it fails to save the version", func() { - disaster := errors.New("oops") - - BeforeEach(func() { - fakeResource.SaveUncheckedVersionReturns(false, disaster) - }) - - It("returns an error", func() { - Expect(stepErr).To(Equal(disaster)) - }) - }) - }) - - Context("when it fails to find the resource", func() { - disaster := errors.New("oops") - - BeforeEach(func() { - fakePipeline.ResourceReturns(nil, false, disaster) - }) - - It("returns an error", func() { - Expect(stepErr).To(Equal(disaster)) - }) - }) - - Context("when the resource is not found", func() { - BeforeEach(func() { - fakePipeline.ResourceReturns(nil, false, nil) - }) - - It("returns an ErrResourceNotFound", func() { - Expect(stepErr).To(Equal(exec.ErrResourceNotFound{"some-pipeline-resource"})) - }) - }) - }) - - Context("when it fails to find the pipeline", func() { - disaster := errors.New("oops") - - BeforeEach(func() { - fakeBuild.PipelineReturns(nil, false, disaster) - }) - - It("returns an error", func() { - Expect(stepErr).To(Equal(disaster)) - }) - }) - - Context("when the pipeline is not found", func() { - BeforeEach(func() { - fakeBuild.PipelineReturns(nil, false, nil) - }) - - It("returns an ErrPipelineNotFound", func() { - Expect(stepErr).To(Equal(exec.ErrPipelineNotFound{"pipeline"})) - }) + It("saves a version for the resource", func() { + Expect(fakeDelegate.UpdateVersionCallCount()).To(Equal(1)) + _, plan, info := fakeDelegate.UpdateVersionArgsForCall(0) + Expect(plan.Resource).To(Equal("some-pipeline-resource")) + Expect(info.Version).To(Equal(atc.Version{"some": "version"})) + Expect(info.Metadata).To(Equal([]atc.MetadataField{{Name: "some", Value: "metadata"}})) }) }) Context("when getting an anonymous resource", func() { - var fakeResourceCache *dbfakes.FakeUsedResourceCache - var fakeResourceConfig *dbfakes.FakeResourceConfig + BeforeEach(func() { getPlan.Resource = "" - - fakeResourceCache = new(dbfakes.FakeUsedResourceCache) - fakeResourceConfig = new(dbfakes.FakeResourceConfig) - fakeResourceCache.ResourceConfigReturns(fakeResourceConfig) - fakeResourceCacheFactory.FindOrCreateResourceCacheReturns(fakeResourceCache, nil) }) - It("does not find the pipeline", func() { - // TODO: this can be removed once /check returns metadata - Expect(fakeBuild.PipelineCallCount()).To(Equal(0)) + It("does not save the version", func() { + Expect(fakeDelegate.UpdateVersionCallCount()).To(Equal(0)) }) }) diff --git a/atc/exec/put_step.go b/atc/exec/put_step.go index e88ded9eb..b4f21ca6a 100644 --- a/atc/exec/put_step.go +++ b/atc/exec/put_step.go @@ -20,75 +20,48 @@ type PutDelegate interface { Initializing(lager.Logger) Starting(lager.Logger) Finished(lager.Logger, ExitStatus, VersionInfo) + SaveOutput(lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, VersionInfo) } // PutStep produces a resource version using preconfigured params and any data // available in the worker.ArtifactRepository. type PutStep struct { - build db.Build - - name string - resourceType string - resource string - source creds.Source - params creds.Params - tags atc.Tags - inputs PutInputs - - delegate PutDelegate - pool worker.Pool - resourceConfigFactory db.ResourceConfigFactory planID atc.PlanID + plan atc.PutPlan + metadata StepMetadata containerMetadata db.ContainerMetadata - stepMetadata StepMetadata - - resourceTypes creds.VersionedResourceTypes - - versionInfo VersionInfo - succeeded bool - - strategy worker.ContainerPlacementStrategy - resourceFactory resource.ResourceFactory + secrets creds.Secrets + resourceFactory resource.ResourceFactory + resourceConfigFactory db.ResourceConfigFactory + strategy worker.ContainerPlacementStrategy + pool worker.Pool + delegate PutDelegate + succeeded bool } func NewPutStep( - build db.Build, - name string, - resourceType string, - resourceName string, - source creds.Source, - params creds.Params, - tags atc.Tags, - inputs PutInputs, - delegate PutDelegate, - pool worker.Pool, - resourceConfigFactory db.ResourceConfigFactory, planID atc.PlanID, + plan atc.PutPlan, + metadata StepMetadata, containerMetadata db.ContainerMetadata, - stepMetadata StepMetadata, - resourceTypes creds.VersionedResourceTypes, - strategy worker.ContainerPlacementStrategy, + secrets creds.Secrets, resourceFactory resource.ResourceFactory, + resourceConfigFactory db.ResourceConfigFactory, + strategy worker.ContainerPlacementStrategy, + pool worker.Pool, + delegate PutDelegate, ) *PutStep { return &PutStep{ - build: build, - - resourceType: resourceType, - name: name, - resource: resourceName, - source: source, - params: params, - tags: tags, - inputs: inputs, - delegate: delegate, - pool: pool, - resourceConfigFactory: resourceConfigFactory, planID: planID, + plan: plan, + metadata: metadata, containerMetadata: containerMetadata, - stepMetadata: stepMetadata, - resourceTypes: resourceTypes, - strategy: strategy, + secrets: secrets, resourceFactory: resourceFactory, + resourceConfigFactory: resourceConfigFactory, + pool: pool, + strategy: strategy, + delegate: delegate, } } @@ -103,40 +76,79 @@ func NewPutStep( func (step *PutStep) Run(ctx context.Context, state RunState) error { logger := lagerctx.FromContext(ctx) logger = logger.Session("put-step", lager.Data{ - "step-name": step.name, - "job-id": step.build.JobID(), + "step-name": step.plan.Name, + "job-id": step.metadata.JobID, }) step.delegate.Initializing(logger) - containerInputs, err := step.inputs.FindAll(state.Artifacts()) + variables := creds.NewVariables(step.secrets, step.metadata.TeamName, step.metadata.PipelineName) + + source, err := creds.NewSource(variables, step.plan.Source).Evaluate() + if err != nil { + return err + } + + params, err := creds.NewParams(variables, step.plan.Params).Evaluate() + if err != nil { + return err + } + + resourceTypes, err := creds.NewVersionedResourceTypes(variables, step.plan.VersionedResourceTypes).Evaluate() + if err != nil { + return err + } + + var putInputs PutInputs + if step.plan.Inputs == nil { + // Put step defaults to all inputs if not specified + putInputs = NewAllInputs() + } else if step.plan.Inputs.All { + putInputs = NewAllInputs() + } else { + // Covers both cases where inputs are specified and when there are no + // inputs specified and "all" field is given a false boolean, which will + // result in no inputs attached + putInputs = NewSpecificInputs(step.plan.Inputs.Specified) + } + + containerInputs, err := putInputs.FindAll(state.Artifacts()) if err != nil { return err } containerSpec := worker.ContainerSpec{ ImageSpec: worker.ImageSpec{ - ResourceType: step.resourceType, + ResourceType: step.plan.Type, }, - Tags: step.tags, - TeamID: step.build.TeamID(), + Tags: step.plan.Tags, + TeamID: step.metadata.TeamID, - Dir: resource.ResourcesDir("put"), + Dir: step.containerMetadata.WorkingDirectory, - Env: step.stepMetadata.Env(), + Env: step.metadata.Env(), Inputs: containerInputs, } workerSpec := worker.WorkerSpec{ - ResourceType: step.resourceType, - Tags: step.tags, - TeamID: step.build.TeamID(), - ResourceTypes: step.resourceTypes, + ResourceType: step.plan.Type, + Tags: step.plan.Tags, + TeamID: step.metadata.TeamID, + ResourceTypes: resourceTypes, } - owner := db.NewBuildStepContainerOwner(step.build.ID(), step.planID, step.build.TeamID()) - chosenWorker, err := step.pool.FindOrChooseWorkerForContainer(logger, owner, containerSpec, workerSpec, step.strategy) + owner := db.NewBuildStepContainerOwner(step.metadata.BuildID, step.planID, step.metadata.TeamID) + + chosenWorker, err := step.pool.FindOrChooseWorkerForContainer( + ctx, + logger, + owner, + containerSpec, + step.containerMetadata, + workerSpec, + step.strategy, + ) if err != nil { return err } @@ -150,24 +162,13 @@ func (step *PutStep) Run(ctx context.Context, state RunState) error { logger, step.delegate, owner, - step.containerMetadata, containerSpec, - step.resourceTypes, + resourceTypes, ) if err != nil { return err } - source, err := step.source.Evaluate() - if err != nil { - return err - } - - params, err := step.params.Evaluate() - if err != nil { - return err - } - step.delegate.Starting(logger) putResource := step.resourceFactory.NewResourceForContainer(container) @@ -192,34 +193,24 @@ func (step *PutStep) Run(ctx context.Context, state RunState) error { return err } - step.versionInfo = VersionInfo{ + versionInfo := VersionInfo{ Version: versionedSource.Version(), Metadata: versionedSource.Metadata(), } - if step.resource != "" { - logger = logger.WithData(lager.Data{"step": step.name, "resource": step.resource, "resource-type": step.resourceType, "version": step.versionInfo.Version}) - err = step.build.SaveOutput(logger, step.resourceType, source, step.resourceTypes, step.versionInfo.Version, db.NewResourceConfigMetadataFields(step.versionInfo.Metadata), step.name, step.resource) - if err != nil { - logger.Error("failed-to-save-output", err) - return err - } + if step.plan.Resource != "" { + step.delegate.SaveOutput(logger, step.plan, source, resourceTypes, versionInfo) } - state.StoreResult(step.planID, step.versionInfo) + state.StoreResult(step.planID, versionInfo) step.succeeded = true - step.delegate.Finished(logger, 0, step.versionInfo) + step.delegate.Finished(logger, 0, versionInfo) return nil } -// VersionInfo returns the info of the pushed version. -func (step *PutStep) VersionInfo() VersionInfo { - return step.versionInfo -} - // Succeeded returns true if the resource script exited successfully. func (step *PutStep) Succeeded() bool { return step.succeeded diff --git a/atc/exec/put_step_test.go b/atc/exec/put_step_test.go index 0a42b64ee..b28818528 100644 --- a/atc/exec/put_step_test.go +++ b/atc/exec/put_step_test.go @@ -4,9 +4,8 @@ import ( "context" "errors" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" + "github.com/concourse/concourse/atc/creds/credsfakes" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/dbfakes" "github.com/concourse/concourse/atc/exec" @@ -26,61 +25,58 @@ var _ = Describe("PutStep", func() { ctx context.Context cancel func() - fakeBuild *dbfakes.FakeBuild - - pipelineResourceName string - - fakeStrategy *workerfakes.FakeContainerPlacementStrategy - fakePool *workerfakes.FakePool fakeWorker *workerfakes.FakeWorker + fakePool *workerfakes.FakePool + fakeStrategy *workerfakes.FakeContainerPlacementStrategy fakeResourceFactory *resourcefakes.FakeResourceFactory fakeResourceConfigFactory *dbfakes.FakeResourceConfigFactory + fakeSecretManager *credsfakes.FakeSecrets + fakeDelegate *execfakes.FakePutDelegate + putPlan *atc.PutPlan - variables creds.Variables - - stepMetadata testMetadata = []string{"a=1", "b=2"} + interpolatedResourceTypes atc.VersionedResourceTypes containerMetadata = db.ContainerMetadata{ - Type: db.ContainerTypePut, - StepName: "some-step", + WorkingDirectory: resource.ResourcesDir("put"), + Type: db.ContainerTypePut, + StepName: "some-step", } - planID atc.PlanID - fakeDelegate *execfakes.FakePutDelegate - resourceTypes creds.VersionedResourceTypes + stepMetadata = exec.StepMetadata{ + TeamID: 123, + TeamName: "some-team", + BuildID: 42, + BuildName: "some-build", + PipelineID: 4567, + PipelineName: "some-pipeline", + } repo *artifact.Repository state *execfakes.FakeRunState - stdoutBuf *gbytes.Buffer - stderrBuf *gbytes.Buffer - putStep *exec.PutStep stepErr error - putInputs exec.PutInputs + stdoutBuf *gbytes.Buffer + stderrBuf *gbytes.Buffer + + planID atc.PlanID ) BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) - fakeBuild = new(dbfakes.FakeBuild) - fakeBuild.IDReturns(42) - fakeBuild.TeamIDReturns(123) - planID = atc.PlanID("some-plan-id") - pipelineResourceName = "some-resource" - fakeStrategy = new(workerfakes.FakeContainerPlacementStrategy) fakePool = new(workerfakes.FakePool) fakeWorker = new(workerfakes.FakeWorker) fakeResourceFactory = new(resourcefakes.FakeResourceFactory) fakeResourceConfigFactory = new(dbfakes.FakeResourceConfigFactory) - variables = template.StaticVariables{ - "custom-param": "source", - "source-param": "super-secret-source", - } + + fakeSecretManager = new(credsfakes.FakeSecrets) + fakeSecretManager.GetReturnsOnCall(0, "super-secret-source", nil, true, nil) + fakeSecretManager.GetReturnsOnCall(1, "source", nil, true, nil) fakeDelegate = new(execfakes.FakePutDelegate) stdoutBuf = gbytes.NewBuffer() @@ -92,7 +88,7 @@ var _ = Describe("PutStep", func() { state = new(execfakes.FakeRunState) state.ArtifactsReturns(repo) - resourceTypes = creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + uninterpolatedResourceTypes := atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "custom-resource", @@ -101,11 +97,28 @@ var _ = Describe("PutStep", func() { }, Version: atc.Version{"some-custom": "version"}, }, - }) + } - stepErr = nil + interpolatedResourceTypes = atc.VersionedResourceTypes{ + { + ResourceType: atc.ResourceType{ + Name: "custom-resource", + Type: "custom-type", + Source: atc.Source{"some-custom": "source"}, + }, + Version: atc.Version{"some-custom": "version"}, + }, + } - putInputs = exec.NewAllInputs() + putPlan = &atc.PutPlan{ + Name: "some-name", + Resource: "some-resource", + Type: "some-resource-type", + Source: atc.Source{"some": "((source-param))"}, + Params: atc.Params{"some-param": "some-value"}, + Tags: []string{"some", "tags"}, + VersionedResourceTypes: uninterpolatedResourceTypes, + } }) AfterEach(func() { @@ -113,24 +126,22 @@ var _ = Describe("PutStep", func() { }) JustBeforeEach(func() { + plan := atc.Plan{ + ID: atc.PlanID(planID), + Put: putPlan, + } + putStep = exec.NewPutStep( - fakeBuild, - "some-name", - "some-resource-type", - pipelineResourceName, - creds.NewSource(variables, atc.Source{"some": "((source-param))"}), - creds.NewParams(variables, atc.Params{"some-param": "some-value"}), - []string{"some", "tags"}, - putInputs, - fakeDelegate, - fakePool, - fakeResourceConfigFactory, - planID, - containerMetadata, + plan.ID, + *plan.Put, stepMetadata, - resourceTypes, - fakeStrategy, + containerMetadata, + fakeSecretManager, fakeResourceFactory, + fakeResourceConfigFactory, + fakeStrategy, + fakePool, + fakeDelegate, ) stepErr = putStep.Run(ctx, state) @@ -168,7 +179,7 @@ var _ = Describe("PutStep", func() { fakeVersionedSource = new(resourcefakes.FakeVersionedSource) fakeVersionedSource.VersionReturns(atc.Version{"some": "version"}) - fakeVersionedSource.MetadataReturns([]atc.MetadataField{{"some", "metadata"}}) + fakeVersionedSource.MetadataReturns([]atc.MetadataField{{Name: "some", Value: "metadata"}}) fakeWorker.NameReturns("some-worker") fakePool.FindOrChooseWorkerForContainerReturns(fakeWorker, nil) @@ -180,33 +191,33 @@ var _ = Describe("PutStep", func() { It("finds/chooses a worker and creates a container with the correct type, session, and sources with no inputs specified (meaning it takes all artifacts)", func() { Expect(fakePool.FindOrChooseWorkerForContainerCallCount()).To(Equal(1)) - _, actualOwner, actualContainerSpec, actualWorkerSpec, strategy := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, actualOwner, actualContainerSpec, actualContainerMetadata, actualWorkerSpec, strategy := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(actualOwner).To(Equal(db.NewBuildStepContainerOwner(42, atc.PlanID(planID), 123))) Expect(actualContainerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "some-resource-type", })) Expect(actualContainerSpec.Tags).To(Equal([]string{"some", "tags"})) Expect(actualContainerSpec.TeamID).To(Equal(123)) - Expect(actualContainerSpec.Env).To(Equal([]string{"a=1", "b=2"})) + Expect(actualContainerSpec.Env).To(Equal(stepMetadata.Env())) Expect(actualContainerSpec.Dir).To(Equal("/tmp/build/put")) Expect(actualContainerSpec.Inputs).To(HaveLen(3)) + Expect(actualContainerMetadata).To(Equal(containerMetadata)) Expect(actualWorkerSpec).To(Equal(worker.WorkerSpec{ TeamID: 123, Tags: []string{"some", "tags"}, ResourceType: "some-resource-type", - ResourceTypes: resourceTypes, + ResourceTypes: interpolatedResourceTypes, })) Expect(strategy).To(Equal(fakeStrategy)) - _, _, delegate, owner, cm, containerSpec, actualResourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) - Expect(cm).To(Equal(containerMetadata)) + _, _, delegate, owner, containerSpec, actualResourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(owner).To(Equal(db.NewBuildStepContainerOwner(42, atc.PlanID(planID), 123))) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "some-resource-type", })) Expect(containerSpec.Tags).To(Equal([]string{"some", "tags"})) Expect(containerSpec.TeamID).To(Equal(123)) - Expect(containerSpec.Env).To(Equal([]string{"a=1", "b=2"})) + Expect(containerSpec.Env).To(Equal(stepMetadata.Env())) Expect(containerSpec.Dir).To(Equal("/tmp/build/put")) Expect(containerSpec.Inputs).To(HaveLen(3)) @@ -219,17 +230,19 @@ var _ = Describe("PutStep", func() { exec.PutResourceSource{fakeOtherSource}, exec.PutResourceSource{fakeMountedSource}, )) - Expect(actualResourceTypes).To(Equal(resourceTypes)) + Expect(actualResourceTypes).To(Equal(interpolatedResourceTypes)) Expect(delegate).To(Equal(fakeDelegate)) }) Context("when the inputs are specified", func() { BeforeEach(func() { - putInputs = exec.NewSpecificInputs([]string{"some-source", "some-other-source"}) + putPlan.Inputs = &atc.InputsConfig{ + Specified: []string{"some-source", "some-other-source"}, + } }) It("initializes the container with specified inputs", func() { - _, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(containerSpec.Inputs).To(HaveLen(2)) Expect([]worker.ArtifactSource{ containerSpec.Inputs[0].Source(), @@ -267,32 +280,26 @@ var _ = Describe("PutStep", func() { Expect(fakeResource.PutCallCount()).To(Equal(1)) }) - It("reports the created version info", func() { - info := putStep.VersionInfo() - Expect(info.Version).To(Equal(atc.Version{"some": "version"})) - Expect(info.Metadata).To(Equal([]atc.MetadataField{{"some", "metadata"}})) - }) - It("is successful", func() { Expect(putStep.Succeeded()).To(BeTrue()) }) It("saves the build output", func() { - Expect(fakeBuild.SaveOutputCallCount()).To(Equal(1)) + Expect(fakeDelegate.SaveOutputCallCount()).To(Equal(1)) - _, actualResourceType, actualSource, actualResourceTypes, version, metadata, outputName, resourceName := fakeBuild.SaveOutputArgsForCall(0) - Expect(actualResourceType).To(Equal("some-resource-type")) + _, plan, actualSource, actualResourceTypes, info := fakeDelegate.SaveOutputArgsForCall(0) + Expect(plan.Name).To(Equal("some-name")) + Expect(plan.Type).To(Equal("some-resource-type")) + Expect(plan.Resource).To(Equal("some-resource")) Expect(actualSource).To(Equal(atc.Source{"some": "super-secret-source"})) - Expect(actualResourceTypes).To(Equal(resourceTypes)) - Expect(version).To(Equal(atc.Version{"some": "version"})) - Expect(metadata).To(Equal(db.NewResourceConfigMetadataFields([]atc.MetadataField{{"some", "metadata"}}))) - Expect(outputName).To(Equal("some-name")) - Expect(resourceName).To(Equal("some-resource")) + Expect(actualResourceTypes).To(Equal(interpolatedResourceTypes)) + Expect(info.Version).To(Equal(atc.Version{"some": "version"})) + Expect(info.Metadata).To(Equal([]atc.MetadataField{{"some", "metadata"}})) }) Context("when the resource is blank", func() { BeforeEach(func() { - pipelineResourceName = "" + putPlan.Resource = "" }) It("is successful", func() { @@ -300,7 +307,7 @@ var _ = Describe("PutStep", func() { }) It("does not save the build output", func() { - Expect(fakeBuild.SaveOutputCallCount()).To(Equal(0)) + Expect(fakeDelegate.SaveOutputCallCount()).To(Equal(0)) }) }) @@ -309,7 +316,7 @@ var _ = Describe("PutStep", func() { _, status, info := fakeDelegate.FinishedArgsForCall(0) Expect(status).To(Equal(exec.ExitStatus(0))) Expect(info.Version).To(Equal(atc.Version{"some": "version"})) - Expect(info.Metadata).To(Equal([]atc.MetadataField{{"some", "metadata"}})) + Expect(info.Metadata).To(Equal([]atc.MetadataField{{Name: "some", Value: "metadata"}})) }) It("stores the version info as the step result", func() { @@ -318,22 +325,10 @@ var _ = Describe("PutStep", func() { Expect(sID).To(Equal(planID)) Expect(sVal).To(Equal(exec.VersionInfo{ Version: atc.Version{"some": "version"}, - Metadata: []atc.MetadataField{{"some", "metadata"}}, + Metadata: []atc.MetadataField{{Name: "some", Value: "metadata"}}, })) }) - Context("when saving the build output fails", func() { - disaster := errors.New("nope") - - BeforeEach(func() { - fakeBuild.SaveOutputReturns(disaster) - }) - - It("returns the error", func() { - Expect(stepErr).To(Equal(disaster)) - }) - }) - Context("when performing the put exits unsuccessfully", func() { BeforeEach(func() { fakeResource.PutReturns(nil, resource.ErrResourceScriptFailed{ diff --git a/atc/exec/step.go b/atc/exec/step.go index eb380f8c2..70281564f 100644 --- a/atc/exec/step.go +++ b/atc/exec/step.go @@ -66,11 +66,5 @@ type ExitStatus int // special privileges (i.e. as an administrator user). type Privileged bool -// StepMetadata is used to inject metadata to make available to the step when -// it's running. -type StepMetadata interface { - Env() []string -} - type InputHandler func(io.ReadCloser) error type OutputHandler func(io.Writer) error diff --git a/atc/exec/step_metadata.go b/atc/exec/step_metadata.go new file mode 100644 index 000000000..b7ce07631 --- /dev/null +++ b/atc/exec/step_metadata.go @@ -0,0 +1,55 @@ +package exec + +import ( + "fmt" +) + +type StepMetadata struct { + BuildID int + BuildName string + TeamID int + TeamName string + JobID int + JobName string + PipelineID int + PipelineName string + ExternalURL string +} + +func (metadata StepMetadata) Env() []string { + env := []string{fmt.Sprintf("BUILD_ID=%d", metadata.BuildID)} + + if metadata.BuildName != "" { + env = append(env, "BUILD_NAME="+metadata.BuildName) + } + + if metadata.TeamID != 0 { + env = append(env, fmt.Sprintf("BUILD_TEAM_ID=%d", metadata.TeamID)) + } + + if metadata.TeamName != "" { + env = append(env, "BUILD_TEAM_NAME="+metadata.TeamName) + } + + if metadata.JobID != 0 { + env = append(env, fmt.Sprintf("BUILD_JOB_ID=%d", metadata.JobID)) + } + + if metadata.JobName != "" { + env = append(env, "BUILD_JOB_NAME="+metadata.JobName) + } + + if metadata.PipelineID != 0 { + env = append(env, fmt.Sprintf("BUILD_PIPELINE_ID=%d", metadata.PipelineID)) + } + + if metadata.PipelineName != "" { + env = append(env, "BUILD_PIPELINE_NAME="+metadata.PipelineName) + } + + if metadata.ExternalURL != "" { + env = append(env, "ATC_EXTERNAL_URL="+metadata.ExternalURL) + } + + return env +} diff --git a/atc/exec/step_metadata_test.go b/atc/exec/step_metadata_test.go new file mode 100644 index 000000000..b2a574a04 --- /dev/null +++ b/atc/exec/step_metadata_test.go @@ -0,0 +1,57 @@ +package exec_test + +import ( + "github.com/concourse/concourse/atc/exec" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("StepMetadata", func() { + var stepMetadata exec.StepMetadata + + Describe("Env", func() { + Context("when populating fields", func() { + BeforeEach(func() { + stepMetadata = exec.StepMetadata{ + BuildID: 1, + BuildName: "42", + TeamID: 2222, + TeamName: "some-team", + JobID: 3333, + JobName: "some-job-name", + PipelineID: 4444, + PipelineName: "some-pipeline-name", + ExternalURL: "http://www.example.com", + } + }) + + It("returns the specified values", func() { + Expect(stepMetadata.Env()).To(ConsistOf( + "BUILD_ID=1", + "BUILD_NAME=42", + "BUILD_TEAM_ID=2222", + "BUILD_TEAM_NAME=some-team", + "BUILD_JOB_ID=3333", + "BUILD_JOB_NAME=some-job-name", + "BUILD_PIPELINE_ID=4444", + "BUILD_PIPELINE_NAME=some-pipeline-name", + "ATC_EXTERNAL_URL=http://www.example.com", + )) + }) + }) + + Context("when fields are empty", func() { + BeforeEach(func() { + stepMetadata = exec.StepMetadata{ + BuildID: 1, + } + }) + It("does not include fields that are not set", func() { + Expect(stepMetadata.Env()).To(Equal([]string{ + "BUILD_ID=1", + })) + }) + }) + }) +}) diff --git a/atc/exec/task_step.go b/atc/exec/task_step.go index b86950858..ee5880006 100644 --- a/atc/exec/task_step.go +++ b/atc/exec/task_step.go @@ -13,7 +13,7 @@ import ( "code.cloudfoundry.org/garden" "code.cloudfoundry.org/lager" "code.cloudfoundry.org/lager/lagerctx" - boshtemplate "github.com/cloudfoundry/bosh-cli/director/template" + "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" @@ -66,73 +66,39 @@ type TaskDelegate interface { // TaskStep executes a TaskConfig, whose inputs will be fetched from the // artifact.Repository and outputs will be added to the artifact.Repository. type TaskStep struct { - privileged Privileged - configSource TaskConfigSource - tags atc.Tags - inputMapping map[string]string - outputMapping map[string]string - - artifactsRoot string - imageArtifactName string - - delegate TaskDelegate - - workerPool worker.Pool - teamID int - buildID int - jobID int - stepName string planID atc.PlanID + plan atc.TaskPlan + defaultLimits atc.ContainerLimits + metadata StepMetadata containerMetadata db.ContainerMetadata - - resourceTypes creds.VersionedResourceTypes - - defaultLimits atc.ContainerLimits - - succeeded bool - - strategy worker.ContainerPlacementStrategy + secrets creds.Secrets + strategy worker.ContainerPlacementStrategy + workerPool worker.Pool + delegate TaskDelegate + succeeded bool } func NewTaskStep( - privileged Privileged, - configSource TaskConfigSource, - tags atc.Tags, - inputMapping map[string]string, - outputMapping map[string]string, - artifactsRoot string, - imageArtifactName string, - delegate TaskDelegate, - workerPool worker.Pool, - teamID int, - buildID int, - jobID int, - stepName string, planID atc.PlanID, - containerMetadata db.ContainerMetadata, - resourceTypes creds.VersionedResourceTypes, + plan atc.TaskPlan, defaultLimits atc.ContainerLimits, + metadata StepMetadata, + containerMetadata db.ContainerMetadata, + secrets creds.Secrets, strategy worker.ContainerPlacementStrategy, + workerPool worker.Pool, + delegate TaskDelegate, ) Step { return &TaskStep{ - privileged: privileged, - configSource: configSource, - tags: tags, - inputMapping: inputMapping, - outputMapping: outputMapping, - artifactsRoot: artifactsRoot, - imageArtifactName: imageArtifactName, - delegate: delegate, - workerPool: workerPool, - teamID: teamID, - buildID: buildID, - jobID: jobID, - stepName: stepName, planID: planID, - containerMetadata: containerMetadata, - resourceTypes: resourceTypes, + plan: plan, defaultLimits: defaultLimits, + metadata: metadata, + containerMetadata: containerMetadata, + secrets: secrets, strategy: strategy, + workerPool: workerPool, + delegate: delegate, } } @@ -151,45 +117,88 @@ func NewTaskStep( // are registered with the artifact.Repository. If no outputs are specified, the // task's entire working directory is registered as an ArtifactSource under the // name of the task. -func (action *TaskStep) Run(ctx context.Context, state RunState) error { +func (step *TaskStep) Run(ctx context.Context, state RunState) error { logger := lagerctx.FromContext(ctx) logger = logger.Session("task-step", lager.Data{ - "step-name": action.stepName, - "job-id": action.jobID, + "step-name": step.plan.Name, + "job-id": step.metadata.JobID, }) + variables := creds.NewVariables(step.secrets, step.metadata.TeamName, step.metadata.PipelineName) + + resourceTypes, err := creds.NewVersionedResourceTypes(variables, step.plan.VersionedResourceTypes).Evaluate() + if err != nil { + return err + } + + var taskConfigSource TaskConfigSource + var taskVars []template.Variables + + if step.plan.ConfigPath != "" { + // external task - construct a source which reads it from file + taskConfigSource = FileConfigSource{ConfigPath: step.plan.ConfigPath} + + // for interpolation - use 'vars' from the pipeline, and then fill remaining with cred variables + taskVars = []template.Variables{template.StaticVariables(step.plan.Vars), variables} + } else { + // embedded task - first we take it + taskConfigSource = StaticConfigSource{Config: step.plan.Config} + + // for interpolation - use just cred variables + taskVars = []template.Variables{variables} + } + + // override params + taskConfigSource = &OverrideParamsConfigSource{ConfigSource: taskConfigSource, Params: step.plan.Params} + + // interpolate template vars + taskConfigSource = InterpolateTemplateConfigSource{ConfigSource: taskConfigSource, Vars: taskVars} + + // validate + taskConfigSource = ValidatingConfigSource{ConfigSource: taskConfigSource} + repository := state.Artifacts() - config, err := action.configSource.FetchConfig(logger, repository) + config, err := taskConfigSource.FetchConfig(logger, repository) - for _, warning := range action.configSource.Warnings() { - fmt.Fprintln(action.delegate.Stderr(), "[WARNING]", warning) + for _, warning := range taskConfigSource.Warnings() { + fmt.Fprintln(step.delegate.Stderr(), "[WARNING]", warning) } if err != nil { return err } + if config.Limits.CPU == nil { - config.Limits.CPU = action.defaultLimits.CPU + config.Limits.CPU = step.defaultLimits.CPU } if config.Limits.Memory == nil { - config.Limits.Memory = action.defaultLimits.Memory + config.Limits.Memory = step.defaultLimits.Memory } - action.delegate.Initializing(logger, config) + step.delegate.Initializing(logger, config) - containerSpec, err := action.containerSpec(logger, repository, config) + workerSpec, err := step.workerSpec(logger, resourceTypes, repository, config) if err != nil { return err } - workerSpec, err := action.workerSpec(logger, action.resourceTypes, repository, config) + containerSpec, err := step.containerSpec(logger, repository, config, step.containerMetadata) if err != nil { return err } - owner := db.NewBuildStepContainerOwner(action.buildID, action.planID, action.teamID) - chosenWorker, err := action.workerPool.FindOrChooseWorkerForContainer(logger, owner, containerSpec, workerSpec, action.strategy) + owner := db.NewBuildStepContainerOwner(step.metadata.BuildID, step.planID, step.metadata.TeamID) + + chosenWorker, err := step.workerPool.FindOrChooseWorkerForContainer( + ctx, + logger, + owner, + containerSpec, + step.containerMetadata, + workerSpec, + step.strategy, + ) if err != nil { return err } @@ -197,11 +206,10 @@ func (action *TaskStep) Run(ctx context.Context, state RunState) error { container, err := chosenWorker.FindOrCreateContainer( ctx, logger, - action.delegate, + step.delegate, owner, - action.containerMetadata, containerSpec, - action.resourceTypes, + resourceTypes, ) if err != nil { return err @@ -216,9 +224,9 @@ func (action *TaskStep) Run(ctx context.Context, state RunState) error { return err } - action.succeeded = (status == 0) + step.succeeded = (status == 0) - err = action.registerOutputs(logger, repository, config, container) + err = step.registerOutputs(logger, repository, config, container, step.containerMetadata) if err != nil { return err } @@ -227,8 +235,8 @@ func (action *TaskStep) Run(ctx context.Context, state RunState) error { } processIO := garden.ProcessIO{ - Stdout: action.delegate.Stdout(), - Stderr: action.delegate.Stderr(), + Stdout: step.delegate.Stdout(), + Stderr: step.delegate.Stderr(), } process, err := container.Attach(taskProcessID, processIO) @@ -237,7 +245,7 @@ func (action *TaskStep) Run(ctx context.Context, state RunState) error { } else { logger.Info("spawning") - action.delegate.Starting(logger, config) + step.delegate.Starting(logger, config) process, err = container.Run( garden.ProcessSpec{ @@ -246,7 +254,7 @@ func (action *TaskStep) Run(ctx context.Context, state RunState) error { Path: config.Run.Path, Args: config.Run.Args, - Dir: path.Join(action.artifactsRoot, config.Run.Dir), + Dir: path.Join(step.containerMetadata.WorkingDirectory, config.Run.Dir), // Guardian sets the default TTY window size to width: 80, height: 24, // which creates ANSI control sequences that do not work with other window sizes @@ -274,7 +282,7 @@ func (action *TaskStep) Run(ctx context.Context, state RunState) error { select { case <-ctx.Done(): - err = action.registerOutputs(logger, repository, config, container) + err = step.registerOutputs(logger, repository, config, container, step.containerMetadata) if err != nil { return err } @@ -293,39 +301,39 @@ func (action *TaskStep) Run(ctx context.Context, state RunState) error { return processErr } - err = action.registerOutputs(logger, repository, config, container) + err = step.registerOutputs(logger, repository, config, container, step.containerMetadata) if err != nil { return err } - action.delegate.Finished(logger, ExitStatus(processStatus)) + step.delegate.Finished(logger, ExitStatus(processStatus)) err = container.SetProperty(taskExitStatusPropertyName, fmt.Sprintf("%d", processStatus)) if err != nil { return err } - action.succeeded = processStatus == 0 + step.succeeded = processStatus == 0 return nil } } -func (action *TaskStep) Succeeded() bool { - return action.succeeded +func (step *TaskStep) Succeeded() bool { + return step.succeeded } -func (action *TaskStep) imageSpec(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig) (worker.ImageSpec, error) { +func (step *TaskStep) imageSpec(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig) (worker.ImageSpec, error) { imageSpec := worker.ImageSpec{ - Privileged: bool(action.privileged), + Privileged: bool(step.plan.Privileged), } // Determine the source of the container image // a reference to an artifact (get step, task output) ? - if action.imageArtifactName != "" { - source, found := repository.SourceFor(artifact.Name(action.imageArtifactName)) + if step.plan.ImageArtifactName != "" { + source, found := repository.SourceFor(artifact.Name(step.plan.ImageArtifactName)) if !found { - return worker.ImageSpec{}, MissingTaskImageSourceError{action.imageArtifactName} + return worker.ImageSpec{}, MissingTaskImageSourceError{step.plan.ImageArtifactName} } imageSpec.ImageArtifactSource = source @@ -334,7 +342,7 @@ func (action *TaskStep) imageSpec(logger lager.Logger, repository *artifact.Repo } else if config.ImageResource != nil { imageSpec.ImageResource = &worker.ImageResource{ Type: config.ImageResource.Type, - Source: creds.NewSource(boshtemplate.StaticVariables{}, config.ImageResource.Source), + Source: config.ImageResource.Source, Params: config.ImageResource.Params, Version: config.ImageResource.Version, } @@ -346,13 +354,13 @@ func (action *TaskStep) imageSpec(logger lager.Logger, repository *artifact.Repo return imageSpec, nil } -func (action *TaskStep) containerInputs(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig) ([]worker.InputSource, error) { +func (step *TaskStep) containerInputs(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig, metadata db.ContainerMetadata) ([]worker.InputSource, error) { inputs := []worker.InputSource{} var missingRequiredInputs []string for _, input := range config.Inputs { inputName := input.Name - if sourceName, ok := action.inputMapping[inputName]; ok { + if sourceName, ok := step.plan.InputMapping[inputName]; ok { inputName = sourceName } @@ -367,7 +375,7 @@ func (action *TaskStep) containerInputs(logger lager.Logger, repository *artifac inputs = append(inputs, &taskInputSource{ config: input, source: source, - artifactsRoot: action.artifactsRoot, + artifactsRoot: metadata.WorkingDirectory, }) } @@ -376,10 +384,10 @@ func (action *TaskStep) containerInputs(logger lager.Logger, repository *artifac } for _, cacheConfig := range config.Caches { - source := newTaskCacheSource(logger, action.teamID, action.jobID, action.stepName, cacheConfig.Path) + source := newTaskCacheSource(logger, step.metadata.TeamID, step.metadata.JobID, step.plan.Name, cacheConfig.Path) inputs = append(inputs, &taskCacheInputSource{ source: source, - artifactsRoot: action.artifactsRoot, + artifactsRoot: metadata.WorkingDirectory, cachePath: cacheConfig.Path, }) } @@ -387,48 +395,48 @@ func (action *TaskStep) containerInputs(logger lager.Logger, repository *artifac return inputs, nil } -func (action *TaskStep) containerSpec(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig) (worker.ContainerSpec, error) { - imageSpec, err := action.imageSpec(logger, repository, config) +func (step *TaskStep) containerSpec(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig, metadata db.ContainerMetadata) (worker.ContainerSpec, error) { + imageSpec, err := step.imageSpec(logger, repository, config) if err != nil { return worker.ContainerSpec{}, err } containerSpec := worker.ContainerSpec{ Platform: config.Platform, - Tags: action.tags, - TeamID: action.teamID, + Tags: step.plan.Tags, + TeamID: step.metadata.TeamID, ImageSpec: imageSpec, Limits: worker.ContainerLimits(config.Limits), User: config.Run.User, - Dir: action.artifactsRoot, - Env: action.envForParams(config.Params), + Dir: metadata.WorkingDirectory, + Env: step.envForParams(config.Params), Inputs: []worker.InputSource{}, Outputs: worker.OutputPaths{}, } - containerSpec.Inputs, err = action.containerInputs(logger, repository, config) + containerSpec.Inputs, err = step.containerInputs(logger, repository, config, metadata) if err != nil { return worker.ContainerSpec{}, err } for _, output := range config.Outputs { - path := artifactsPath(output, action.artifactsRoot) + path := artifactsPath(output, metadata.WorkingDirectory) containerSpec.Outputs[output.Name] = path } return containerSpec, nil } -func (action *TaskStep) workerSpec(logger lager.Logger, resourceTypes creds.VersionedResourceTypes, repository *artifact.Repository, config atc.TaskConfig) (worker.WorkerSpec, error) { +func (step *TaskStep) workerSpec(logger lager.Logger, resourceTypes atc.VersionedResourceTypes, repository *artifact.Repository, config atc.TaskConfig) (worker.WorkerSpec, error) { workerSpec := worker.WorkerSpec{ Platform: config.Platform, - Tags: action.tags, - TeamID: action.teamID, + Tags: step.plan.Tags, + TeamID: step.metadata.TeamID, ResourceTypes: resourceTypes, } - imageSpec, err := action.imageSpec(logger, repository, config) + imageSpec, err := step.imageSpec(logger, repository, config) if err != nil { return worker.WorkerSpec{}, err } @@ -440,18 +448,18 @@ func (action *TaskStep) workerSpec(logger lager.Logger, resourceTypes creds.Vers return workerSpec, nil } -func (action *TaskStep) registerOutputs(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig, container worker.Container) error { +func (step *TaskStep) registerOutputs(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig, container worker.Container, metadata db.ContainerMetadata) error { volumeMounts := container.VolumeMounts() logger.Debug("registering-outputs", lager.Data{"outputs": config.Outputs}) for _, output := range config.Outputs { outputName := output.Name - if destinationName, ok := action.outputMapping[output.Name]; ok { + if destinationName, ok := step.plan.OutputMapping[output.Name]; ok { outputName = destinationName } - outputPath := artifactsPath(output, action.artifactsRoot) + outputPath := artifactsPath(output, metadata.WorkingDirectory) for _, mount := range volumeMounts { if filepath.Clean(mount.MountPath) == filepath.Clean(outputPath) { @@ -462,20 +470,20 @@ func (action *TaskStep) registerOutputs(logger lager.Logger, repository *artifac } // Do not initialize caches for one-off builds - if action.jobID != 0 { + if step.metadata.JobID != 0 { logger.Debug("initializing-caches", lager.Data{"caches": config.Caches}) for _, cacheConfig := range config.Caches { for _, volumeMount := range volumeMounts { - if volumeMount.MountPath == filepath.Join(action.artifactsRoot, cacheConfig.Path) { + if volumeMount.MountPath == filepath.Join(metadata.WorkingDirectory, cacheConfig.Path) { logger.Debug("initializing-cache", lager.Data{"path": volumeMount.MountPath}) err := volumeMount.Volume.InitializeTaskCache( logger, - action.jobID, - action.stepName, + step.metadata.JobID, + step.plan.Name, cacheConfig.Path, - bool(action.privileged)) + bool(step.plan.Privileged)) if err != nil { return err } diff --git a/atc/exec/task_step_test.go b/atc/exec/task_step_test.go index b9d5e9c81..afe299095 100644 --- a/atc/exec/task_step_test.go +++ b/atc/exec/task_step_test.go @@ -13,9 +13,8 @@ import ( "code.cloudfoundry.org/garden/gardenfakes" "code.cloudfoundry.org/lager" "code.cloudfoundry.org/lager/lagertest" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" + "github.com/concourse/concourse/atc/creds/credsfakes" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/exec" "github.com/concourse/concourse/atc/exec/artifact" @@ -30,124 +29,125 @@ import ( var _ = Describe("TaskStep", func() { var ( ctx context.Context + cancel func() logger *lagertest.TestLogger - cancel func() + stdoutBuf *gbytes.Buffer + stderrBuf *gbytes.Buffer fakePool *workerfakes.FakePool fakeWorker *workerfakes.FakeWorker fakeStrategy *workerfakes.FakeContainerPlacementStrategy - stdoutBuf *gbytes.Buffer - stderrBuf *gbytes.Buffer + fakeSecretManager *credsfakes.FakeSecrets + fakeDelegate *execfakes.FakeTaskDelegate + taskPlan *atc.TaskPlan - imageArtifactName string - containerMetadata db.ContainerMetadata - - fakeDelegate *execfakes.FakeTaskDelegate - - privileged exec.Privileged - tags []string - teamID int - buildID int - planID atc.PlanID - jobID int - configSource *execfakes.FakeTaskConfigSource - resourceTypes creds.VersionedResourceTypes - inputMapping map[string]string - outputMapping map[string]string + interpolatedResourceTypes atc.VersionedResourceTypes repo *artifact.Repository state *execfakes.FakeRunState taskStep exec.Step + stepErr error - stepErr error + containerMetadata = db.ContainerMetadata{ + WorkingDirectory: "some-artifact-root", + Type: db.ContainerTypeTask, + StepName: "some-step", + } + + stepMetadata = exec.StepMetadata{ + TeamID: 123, + BuildID: 1234, + JobID: 12345, + } + + planID = atc.PlanID(42) ) BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) logger = lagertest.NewTestLogger("task-action-test") + stdoutBuf = gbytes.NewBuffer() + stderrBuf = gbytes.NewBuffer() + fakeWorker = new(workerfakes.FakeWorker) fakePool = new(workerfakes.FakePool) fakeStrategy = new(workerfakes.FakeContainerPlacementStrategy) - stdoutBuf = gbytes.NewBuffer() - stderrBuf = gbytes.NewBuffer() + fakeSecretManager = new(credsfakes.FakeSecrets) + fakeSecretManager.GetReturns("super-secret-source", nil, true, nil) fakeDelegate = new(execfakes.FakeTaskDelegate) fakeDelegate.StdoutReturns(stdoutBuf) fakeDelegate.StderrReturns(stderrBuf) - privileged = false - tags = []string{"step", "tags"} - teamID = 123 - planID = atc.PlanID(42) - buildID = 1234 - jobID = 12345 - configSource = new(execfakes.FakeTaskConfigSource) - repo = artifact.NewRepository() state = new(execfakes.FakeRunState) state.ArtifactsReturns(repo) - resourceTypes = creds.NewVersionedResourceTypes(template.StaticVariables{}, atc.VersionedResourceTypes{ + uninterpolatedResourceTypes := atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "custom-resource", Type: "custom-type", - Source: atc.Source{"some-custom": "source"}, + Source: atc.Source{"some-custom": "((source-param))"}, Params: atc.Params{"some-custom": "param"}, }, Version: atc.Version{"some-custom": "version"}, }, - }) - - inputMapping = nil - outputMapping = nil - imageArtifactName = "" - - containerMetadata = db.ContainerMetadata{ - Type: db.ContainerTypeTask, - StepName: "some-step", } - stepErr = nil + interpolatedResourceTypes = atc.VersionedResourceTypes{ + { + ResourceType: atc.ResourceType{ + Name: "custom-resource", + Type: "custom-type", + Source: atc.Source{"some-custom": "super-secret-source"}, + Params: atc.Params{"some-custom": "param"}, + }, + Version: atc.Version{"some-custom": "version"}, + }, + } + + taskPlan = &atc.TaskPlan{ + Name: "some-task", + Privileged: false, + Tags: []string{"step", "tags"}, + VersionedResourceTypes: uninterpolatedResourceTypes, + } }) JustBeforeEach(func() { + plan := atc.Plan{ + ID: atc.PlanID(planID), + Task: taskPlan, + } + taskStep = exec.NewTaskStep( - privileged, - configSource, - tags, - inputMapping, - outputMapping, - "some-artifact-root", - imageArtifactName, - fakeDelegate, - fakePool, - teamID, - buildID, - jobID, - "some-task", - planID, - containerMetadata, - resourceTypes, + plan.ID, + *plan.Task, atc.ContainerLimits{}, + stepMetadata, + containerMetadata, + fakeSecretManager, fakeStrategy, + fakePool, + fakeDelegate, ) stepErr = taskStep.Run(ctx, state) }) - Context("when getting the config works", func() { - var fetchedConfig atc.TaskConfig + Context("when the plan has a config", func() { BeforeEach(func() { cpu := uint64(1024) memory := uint64(1024) - fetchedConfig = atc.TaskConfig{ + + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", ImageResource: &atc.ImageResource{ Type: "docker", @@ -167,8 +167,6 @@ var _ = Describe("TaskStep", func() { Args: []string{"some", "args"}, }, } - - configSource.FetchConfigReturns(fetchedConfig, nil) }) Context("when the worker is either found or chosen", func() { @@ -182,19 +180,24 @@ var _ = Describe("TaskStep", func() { It("finds or chooses a worker", func() { Expect(fakePool.FindOrChooseWorkerForContainerCallCount()).To(Equal(1)) - _, owner, containerSpec, workerSpec, strategy := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) - Expect(owner).To(Equal(db.NewBuildStepContainerOwner(buildID, planID, teamID))) + _, _, owner, containerSpec, createdMetadata, workerSpec, strategy := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + Expect(owner).To(Equal(db.NewBuildStepContainerOwner(stepMetadata.BuildID, planID, stepMetadata.TeamID))) + Expect(createdMetadata).To(Equal(db.ContainerMetadata{ + WorkingDirectory: "some-artifact-root", + Type: db.ContainerTypeTask, + StepName: "some-step", + })) cpu := uint64(1024) memory := uint64(1024) Expect(containerSpec).To(Equal(worker.ContainerSpec{ Platform: "some-platform", Tags: []string{"step", "tags"}, - TeamID: teamID, + TeamID: stepMetadata.TeamID, ImageSpec: worker.ImageSpec{ ImageResource: &worker.ImageResource{ Type: "docker", - Source: creds.NewSource(template.StaticVariables{}, atc.Source{"some": "secret-source-param"}), + Source: atc.Source{"some": "secret-source-param"}, Params: &atc.Params{"some": "params"}, Version: &atc.Version{"some": "version"}, }, @@ -213,9 +216,9 @@ var _ = Describe("TaskStep", func() { Expect(workerSpec).To(Equal(worker.WorkerSpec{ Platform: "some-platform", Tags: []string{"step", "tags"}, - TeamID: teamID, + TeamID: stepMetadata.TeamID, ResourceType: "docker", - ResourceTypes: resourceTypes, + ResourceTypes: interpolatedResourceTypes, })) Expect(strategy).To(Equal(fakeStrategy)) }) @@ -246,14 +249,9 @@ var _ = Describe("TaskStep", func() { It("finds or creates a container", func() { Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - _, cancel, delegate, owner, createdMetadata, containerSpec, actualResourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, cancel, delegate, owner, containerSpec, actualResourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(cancel).ToNot(BeNil()) - Expect(owner).To(Equal(db.NewBuildStepContainerOwner(buildID, planID, teamID))) - Expect(createdMetadata).To(Equal(db.ContainerMetadata{ - Type: db.ContainerTypeTask, - StepName: "some-step", - })) - + Expect(owner).To(Equal(db.NewBuildStepContainerOwner(stepMetadata.BuildID, planID, stepMetadata.TeamID))) Expect(delegate).To(Equal(fakeDelegate)) cpu := uint64(1024) @@ -261,11 +259,11 @@ var _ = Describe("TaskStep", func() { Expect(containerSpec).To(Equal(worker.ContainerSpec{ Platform: "some-platform", Tags: []string{"step", "tags"}, - TeamID: teamID, + TeamID: stepMetadata.TeamID, ImageSpec: worker.ImageSpec{ ImageResource: &worker.ImageResource{ Type: "docker", - Source: creds.NewSource(template.StaticVariables{}, atc.Source{"some": "secret-source-param"}), + Source: atc.Source{"some": "secret-source-param"}, Params: &atc.Params{"some": "params"}, Version: &atc.Version{"some": "version"}, }, @@ -280,12 +278,12 @@ var _ = Describe("TaskStep", func() { Inputs: []worker.InputSource{}, Outputs: worker.OutputPaths{}, })) - Expect(actualResourceTypes).To(Equal(resourceTypes)) + Expect(actualResourceTypes).To(Equal(interpolatedResourceTypes)) }) Context("when rootfs uri is set instead of image resource", func() { BeforeEach(func() { - fetchedConfig = atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", RootfsURI: "some-image", Params: map[string]string{"SOME": "params"}, @@ -294,26 +292,19 @@ var _ = Describe("TaskStep", func() { Args: []string{"some", "args"}, }, } - - configSource.FetchConfigReturns(fetchedConfig, nil) }) It("finds or creates a container", func() { Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - _, cancel, delegate, owner, createdMetadata, containerSpec, actualResourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, cancel, delegate, owner, containerSpec, actualResourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(cancel).ToNot(BeNil()) - Expect(owner).To(Equal(db.NewBuildStepContainerOwner(buildID, planID, teamID))) - Expect(createdMetadata).To(Equal(db.ContainerMetadata{ - Type: db.ContainerTypeTask, - StepName: "some-step", - })) - + Expect(owner).To(Equal(db.NewBuildStepContainerOwner(stepMetadata.BuildID, planID, stepMetadata.TeamID))) Expect(delegate).To(Equal(fakeDelegate)) Expect(containerSpec).To(Equal(worker.ContainerSpec{ Platform: "some-platform", Tags: []string{"step", "tags"}, - TeamID: teamID, + TeamID: stepMetadata.TeamID, ImageSpec: worker.ImageSpec{ ImageURL: "some-image", Privileged: false, @@ -324,7 +315,7 @@ var _ = Describe("TaskStep", func() { Outputs: worker.OutputPaths{}, })) - Expect(actualResourceTypes).To(Equal(resourceTypes)) + Expect(actualResourceTypes).To(Equal(interpolatedResourceTypes)) }) }) @@ -370,7 +361,7 @@ var _ = Describe("TaskStep", func() { ) BeforeEach(func() { - configSource.FetchConfigReturns(atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", RootfsURI: "some-image", Params: map[string]string{"SOME": "params"}, @@ -383,7 +374,7 @@ var _ = Describe("TaskStep", func() { {Name: "some-other-output"}, {Name: "some-trailing-slash-output", Path: "some-output-configured-path-with-trailing-slash/"}, }, - }, nil) + } fakeNewlyCreatedVolume1 = new(workerfakes.FakeVolume) fakeNewlyCreatedVolume1.HandleReturns("some-handle-1") @@ -518,12 +509,12 @@ var _ = Describe("TaskStep", func() { Context("when privileged", func() { BeforeEach(func() { - privileged = true + taskPlan.Privileged = true }) It("creates the container privileged", func() { Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - _, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(containerSpec.ImageSpec.Privileged).To(BeTrue()) }) @@ -549,7 +540,7 @@ var _ = Describe("TaskStep", func() { inputSource = new(workerfakes.FakeArtifactSource) otherInputSource = new(workerfakes.FakeArtifactSource) - configSource.FetchConfigReturns(atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", RootfsURI: "some-image", Params: map[string]string{"SOME": "params"}, @@ -561,7 +552,7 @@ var _ = Describe("TaskStep", func() { {Name: "some-input", Path: "some-input-configured-path"}, {Name: "some-other-input"}, }, - }, nil) + } }) Context("when all inputs are present", func() { @@ -571,7 +562,7 @@ var _ = Describe("TaskStep", func() { }) It("creates the container with the inputs configured correctly", func() { - _, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(containerSpec.Inputs).To(HaveLen(2)) for _, input := range containerSpec.Inputs { switch input.DestinationPath() { @@ -603,16 +594,17 @@ var _ = Describe("TaskStep", func() { BeforeEach(func() { remappedInputSource = new(workerfakes.FakeArtifactSource) - inputMapping = map[string]string{"remapped-input": "remapped-input-src"} - configSource.FetchConfigReturns(atc.TaskConfig{ + taskPlan.InputMapping = map[string]string{"remapped-input": "remapped-input-src"} + taskPlan.Config = &atc.TaskConfig{ + Platform: "some-platform", Run: atc.TaskRunConfig{ Path: "ls", + Args: []string{"some", "args"}, }, Inputs: []atc.TaskInputConfig{ {Name: "remapped-input"}, }, - }, nil) - + } }) Context("when all inputs are present in the in source repository", func() { @@ -621,7 +613,8 @@ var _ = Describe("TaskStep", func() { }) It("uses remapped input", func() { - _, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) + Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) + _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(containerSpec.Inputs).To(HaveLen(1)) Expect(containerSpec.Inputs[0].Source()).To(Equal(remappedInputSource)) Expect(containerSpec.Inputs[0].DestinationPath()).To(Equal("some-artifact-root/remapped-input")) @@ -646,7 +639,8 @@ var _ = Describe("TaskStep", func() { optionalInputSource = new(workerfakes.FakeArtifactSource) optionalInput2Source = new(workerfakes.FakeArtifactSource) requiredInputSource = new(workerfakes.FakeArtifactSource) - configSource.FetchConfigReturns(atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ + Platform: "some-platform", Run: atc.TaskRunConfig{ Path: "ls", }, @@ -655,7 +649,7 @@ var _ = Describe("TaskStep", func() { {Name: "optional-input-2", Optional: true}, {Name: "required-input"}, }, - }, nil) + } }) Context("when an optional input is missing", func() { @@ -666,7 +660,7 @@ var _ = Describe("TaskStep", func() { It("runs successfully without the optional input", func() { Expect(stepErr).ToNot(HaveOccurred()) - _, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(containerSpec.Inputs).To(HaveLen(2)) Expect(containerSpec.Inputs[0].Source()).To(Equal(optionalInput2Source)) Expect(containerSpec.Inputs[0].DestinationPath()).To(Equal("some-artifact-root/optional-input-2")) @@ -695,15 +689,17 @@ var _ = Describe("TaskStep", func() { ) BeforeEach(func() { - configSource.FetchConfigReturns(atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", RootfsURI: "some-image", - Run: atc.TaskRunConfig{}, + Run: atc.TaskRunConfig{ + Path: "ls", + }, Caches: []atc.CacheConfig{ {Path: "some-path-1"}, {Path: "some-path-2"}, }, - }, nil) + } fakeVolume1 = new(workerfakes.FakeVolume) fakeVolume2 = new(workerfakes.FakeVolume) @@ -720,7 +716,7 @@ var _ = Describe("TaskStep", func() { }) It("creates the container with the caches in the inputs", func() { - _, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(containerSpec.Inputs).To(HaveLen(2)) Expect([]string{ containerSpec.Inputs[0].DestinationPath(), @@ -731,27 +727,33 @@ var _ = Describe("TaskStep", func() { )) }) - It("registers cache volumes as task caches", func() { - Expect(stepErr).ToNot(HaveOccurred()) + Context("when task belongs to a job", func() { + BeforeEach(func() { + stepMetadata.JobID = 12 + }) - Expect(fakeVolume1.InitializeTaskCacheCallCount()).To(Equal(1)) - _, jID, stepName, cachePath, p := fakeVolume1.InitializeTaskCacheArgsForCall(0) - Expect(jID).To(Equal(jobID)) - Expect(stepName).To(Equal("some-task")) - Expect(cachePath).To(Equal("some-path-1")) - Expect(p).To(Equal(bool(privileged))) + It("registers cache volumes as task caches", func() { + Expect(stepErr).ToNot(HaveOccurred()) - Expect(fakeVolume2.InitializeTaskCacheCallCount()).To(Equal(1)) - _, jID, stepName, cachePath, p = fakeVolume2.InitializeTaskCacheArgsForCall(0) - Expect(jID).To(Equal(jobID)) - Expect(stepName).To(Equal("some-task")) - Expect(cachePath).To(Equal("some-path-2")) - Expect(p).To(Equal(bool(privileged))) + Expect(fakeVolume1.InitializeTaskCacheCallCount()).To(Equal(1)) + _, jID, stepName, cachePath, p := fakeVolume1.InitializeTaskCacheArgsForCall(0) + Expect(jID).To(Equal(stepMetadata.JobID)) + Expect(stepName).To(Equal("some-task")) + Expect(cachePath).To(Equal("some-path-1")) + Expect(p).To(Equal(bool(taskPlan.Privileged))) + + Expect(fakeVolume2.InitializeTaskCacheCallCount()).To(Equal(1)) + _, jID, stepName, cachePath, p = fakeVolume2.InitializeTaskCacheArgsForCall(0) + Expect(jID).To(Equal(stepMetadata.JobID)) + Expect(stepName).To(Equal("some-task")) + Expect(cachePath).To(Equal("some-path-2")) + Expect(p).To(Equal(bool(taskPlan.Privileged))) + }) }) Context("when task does not belong to job (one-off build)", func() { BeforeEach(func() { - jobID = 0 + stepMetadata.JobID = 0 }) It("does not initialize caches", func() { @@ -764,7 +766,7 @@ var _ = Describe("TaskStep", func() { Context("when the configuration specifies paths for outputs", func() { BeforeEach(func() { - configSource.FetchConfigReturns(atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", RootfsURI: "some-image", Params: map[string]string{"SOME": "params"}, @@ -777,11 +779,11 @@ var _ = Describe("TaskStep", func() { {Name: "some-other-output"}, {Name: "some-trailing-slash-output", Path: "some-output-configured-path-with-trailing-slash/"}, }, - }, nil) + } }) It("configures them appropriately in the container spec", func() { - _, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(containerSpec.Outputs).To(Equal(worker.OutputPaths{ "some-output": "some-artifact-root/some-output-configured-path/", "some-other-output": "some-artifact-root/some-other-output/", @@ -881,7 +883,7 @@ var _ = Describe("TaskStep", func() { }) It("passes existing output volumes to the resource", func() { - _, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(containerSpec.Outputs).To(Equal(worker.OutputPaths{ "some-output": "some-artifact-root/some-output-configured-path/", "some-other-output": "some-artifact-root/some-other-output/", @@ -1142,15 +1144,16 @@ var _ = Describe("TaskStep", func() { ) BeforeEach(func() { - outputMapping = map[string]string{"generic-remapped-output": "specific-remapped-output"} - configSource.FetchConfigReturns(atc.TaskConfig{ + taskPlan.OutputMapping = map[string]string{"generic-remapped-output": "specific-remapped-output"} + taskPlan.Config = &atc.TaskConfig{ + Platform: "some-platform", Run: atc.TaskRunConfig{ Path: "ls", }, Outputs: []atc.TaskOutputConfig{ {Name: "generic-remapped-output"}, }, - }, nil) + } fakeProcess.WaitReturns(0, nil) @@ -1180,7 +1183,7 @@ var _ = Describe("TaskStep", func() { Context("when an image artifact name is specified", func() { BeforeEach(func() { - imageArtifactName = "some-image-artifact" + taskPlan.ImageArtifactName = "some-image-artifact" fakeProcess.WaitReturns(0, nil) }) @@ -1194,7 +1197,7 @@ var _ = Describe("TaskStep", func() { }) It("chooses a worker and creates the container with the image artifact source", func() { - _, _, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, _, containerSpec, _, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ImageArtifactSource: imageArtifactSource, })) @@ -1216,7 +1219,7 @@ var _ = Describe("TaskStep", func() { Context("when the task config also specifies image", func() { BeforeEach(func() { - configWithImage := atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", RootfsURI: "some-image", Params: map[string]string{"SOME": "params"}, @@ -1225,12 +1228,10 @@ var _ = Describe("TaskStep", func() { Args: []string{"some", "args"}, }, } - - configSource.FetchConfigReturns(configWithImage, nil) }) It("still chooses a worker and creates the container with the volume and a metadata stream", func() { - _, _, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, _, containerSpec, _, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ImageArtifactSource: imageArtifactSource, })) @@ -1241,7 +1242,7 @@ var _ = Describe("TaskStep", func() { Context("when the task config also specifies image_resource", func() { BeforeEach(func() { - configWithImageResource := atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", ImageResource: &atc.ImageResource{ Type: "docker", @@ -1255,12 +1256,10 @@ var _ = Describe("TaskStep", func() { Args: []string{"some", "args"}, }, } - - configSource.FetchConfigReturns(configWithImageResource, nil) }) It("still chooses a worker and creates the container with the volume and a metadata stream", func() { - _, _, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, _, containerSpec, _, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ImageArtifactSource: imageArtifactSource, })) @@ -1271,7 +1270,7 @@ var _ = Describe("TaskStep", func() { Context("when the task config also specifies image and image_resource", func() { BeforeEach(func() { - configWithImageAndImageResource := atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", RootfsURI: "some-image", ImageResource: &atc.ImageResource{ @@ -1286,12 +1285,10 @@ var _ = Describe("TaskStep", func() { Args: []string{"some", "args"}, }, } - - configSource.FetchConfigReturns(configWithImageAndImageResource, nil) }) It("still chooses a worker and creates the container with the volume and a metadata stream", func() { - _, _, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, _, containerSpec, _, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ImageArtifactSource: imageArtifactSource, })) @@ -1315,7 +1312,7 @@ var _ = Describe("TaskStep", func() { Context("when the image_resource is specified (even if RootfsURI is configured)", func() { BeforeEach(func() { - configWithImageResource := atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", RootfsURI: "some-image", ImageResource: &atc.ImageResource{ @@ -1330,15 +1327,13 @@ var _ = Describe("TaskStep", func() { Args: []string{"some", "args"}, }, } - - configSource.FetchConfigReturns(configWithImageResource, nil) }) It("creates the specs with the image resource", func() { - _, _, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, _, containerSpec, _, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(containerSpec.ImageSpec.ImageResource).To(Equal(&worker.ImageResource{ Type: "docker", - Source: creds.NewSource(template.StaticVariables{}, atc.Source{"some": "super-secret-source"}), + Source: atc.Source{"some": "super-secret-source"}, Params: &atc.Params{"some": "params"}, Version: &atc.Version{"some": "version"}, })) @@ -1346,7 +1341,7 @@ var _ = Describe("TaskStep", func() { Expect(workerSpec).To(Equal(worker.WorkerSpec{ TeamID: 123, Platform: "some-platform", - ResourceTypes: resourceTypes, + ResourceTypes: interpolatedResourceTypes, Tags: []string{"step", "tags"}, ResourceType: "docker", })) @@ -1355,7 +1350,7 @@ var _ = Describe("TaskStep", func() { Context("when the RootfsURI is configured", func() { BeforeEach(func() { - configWithRootfs := atc.TaskConfig{ + taskPlan.Config = &atc.TaskConfig{ Platform: "some-platform", RootfsURI: "some-image", Params: map[string]string{"SOME": "params"}, @@ -1364,18 +1359,16 @@ var _ = Describe("TaskStep", func() { Args: []string{"some", "args"}, }, } - - configSource.FetchConfigReturns(configWithRootfs, nil) }) It("creates the specs with the image resource", func() { - _, _, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, _, containerSpec, _, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(containerSpec.ImageSpec.ImageURL).To(Equal("some-image")) Expect(workerSpec).To(Equal(worker.WorkerSpec{ TeamID: 123, Platform: "some-platform", - ResourceTypes: resourceTypes, + ResourceTypes: interpolatedResourceTypes, Tags: []string{"step", "tags"}, })) }) @@ -1383,8 +1376,7 @@ var _ = Describe("TaskStep", func() { Context("when a run dir is specified", func() { BeforeEach(func() { - fetchedConfig.Run.Dir = "/some/dir" - configSource.FetchConfigReturns(fetchedConfig, nil) + taskPlan.Config.Run.Dir = "/some/dir" }) It("runs a process in the specified (custom) directory", func() { @@ -1395,12 +1387,11 @@ var _ = Describe("TaskStep", func() { Context("when a run user is specified", func() { BeforeEach(func() { - fetchedConfig.Run.User = "some-user" - configSource.FetchConfigReturns(fetchedConfig, nil) + taskPlan.Config.Run.User = "some-user" }) It("adds the user to the container spec", func() { - _, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(containerSpec.User).To(Equal("some-user")) }) @@ -1656,15 +1647,29 @@ var _ = Describe("TaskStep", func() { }) }) - Context("when getting the config fails", func() { - disaster := errors.New("nope") + Context("when missing the platform", func() { BeforeEach(func() { - configSource.FetchConfigReturns(atc.TaskConfig{}, disaster) + taskPlan.Config.Platform = "" }) It("returns the error", func() { - Expect(stepErr).To(Equal(disaster)) + Expect(stepErr).To(HaveOccurred()) + }) + + It("is not successful", func() { + Expect(taskStep.Succeeded()).To(BeFalse()) + }) + }) + + Context("when missing the path to the executable", func() { + + BeforeEach(func() { + taskPlan.Config.Run.Path = "" + }) + + It("returns the error", func() { + Expect(stepErr).To(HaveOccurred()) }) It("is not successful", func() { diff --git a/atc/gc/resource_cache_collector_test.go b/atc/gc/resource_cache_collector_test.go index 37dd48699..7b5d34970 100644 --- a/atc/gc/resource_cache_collector_test.go +++ b/atc/gc/resource_cache_collector_test.go @@ -6,7 +6,6 @@ import ( sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/algorithm" "github.com/concourse/concourse/atc/gc" @@ -43,7 +42,6 @@ var _ = Describe("ResourceCacheCollector", func() { Expect(err).ToNot(HaveOccurred()) oneOffCache, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(oneOffBuild.ID()), "some-base-type", atc.Version{"some": "version"}, @@ -51,7 +49,7 @@ var _ = Describe("ResourceCacheCollector", func() { "some": "source", }, nil, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) @@ -59,7 +57,6 @@ var _ = Describe("ResourceCacheCollector", func() { Expect(err).ToNot(HaveOccurred()) jobCache, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(jobBuild.ID()), "some-base-type", atc.Version{"some": "version"}, @@ -67,7 +64,7 @@ var _ = Describe("ResourceCacheCollector", func() { "some": "source", }, nil, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) @@ -77,11 +74,10 @@ var _ = Describe("ResourceCacheCollector", func() { Expect(found).To(BeTrue()) _, err = resource.SetResourceConfig( - logger, atc.Source{ "some": "source", }, - creds.VersionedResourceTypes{}) + atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) }) @@ -173,7 +169,6 @@ var _ = Describe("ResourceCacheCollector", func() { Expect(err).ToNot(HaveOccurred()) secondJobCache, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(secondJobBuild.ID()), "some-base-type", atc.Version{"some": "new-version"}, @@ -181,7 +176,7 @@ var _ = Describe("ResourceCacheCollector", func() { "some": "source", }, nil, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) @@ -224,7 +219,6 @@ var _ = Describe("ResourceCacheCollector", func() { Expect(err).ToNot(HaveOccurred()) secondJobCache, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(secondJobBuild.ID()), "some-base-type", atc.Version{"some": "new-version"}, @@ -232,7 +226,7 @@ var _ = Describe("ResourceCacheCollector", func() { "some": "source", }, nil, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) diff --git a/atc/gc/resource_cache_use_collector_test.go b/atc/gc/resource_cache_use_collector_test.go index 65f2506e3..75613b12d 100644 --- a/atc/gc/resource_cache_use_collector_test.go +++ b/atc/gc/resource_cache_use_collector_test.go @@ -3,9 +3,7 @@ package gc_test import ( "context" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/gc" @@ -52,7 +50,7 @@ var _ = Describe("ResourceCacheUseCollector", func() { Name: "some-type", Type: "some-base-type", Source: atc.Source{ - "some-type": "((source-param))", + "some-type": "source-param", }, }, Version: atc.Version{"some-type": "version"}, @@ -62,7 +60,6 @@ var _ = Describe("ResourceCacheUseCollector", func() { Describe("for one-off builds", func() { BeforeEach(func() { _, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(defaultBuild.ID()), "some-type", atc.Version{"some": "version"}, @@ -70,11 +67,9 @@ var _ = Describe("ResourceCacheUseCollector", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes(template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - versionedResourceType, - }, - ), + atc.VersionedResourceTypes{ + versionedResourceType, + }, ) Expect(err).NotTo(HaveOccurred()) }) @@ -129,7 +124,6 @@ var _ = Describe("ResourceCacheUseCollector", func() { Expect(err).ToNot(HaveOccurred()) _, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(jobBuild.ID()), "some-type", atc.Version{"some": "version"}, @@ -137,11 +131,9 @@ var _ = Describe("ResourceCacheUseCollector", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes(template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - versionedResourceType, - }, - ), + atc.VersionedResourceTypes{ + versionedResourceType, + }, ) Expect(err).NotTo(HaveOccurred()) }) @@ -165,7 +157,6 @@ var _ = Describe("ResourceCacheUseCollector", func() { Expect(err).ToNot(HaveOccurred()) _, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(secondJobBuild.ID()), "some-type", atc.Version{"some": "version"}, @@ -173,11 +164,9 @@ var _ = Describe("ResourceCacheUseCollector", func() { "some": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes(template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - versionedResourceType, - }, - ), + atc.VersionedResourceTypes{ + versionedResourceType, + }, ) Expect(err).NotTo(HaveOccurred()) }) @@ -214,7 +203,6 @@ var _ = Describe("ResourceCacheUseCollector", func() { Expect(err).ToNot(HaveOccurred()) _, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForContainer(container.ID()), "some-type", atc.Version{"some-type": "version"}, @@ -222,11 +210,9 @@ var _ = Describe("ResourceCacheUseCollector", func() { "cache": "source", }, atc.Params{"some": "params"}, - creds.NewVersionedResourceTypes(template.StaticVariables{"source-param": "some-secret-sauce"}, - atc.VersionedResourceTypes{ - versionedResourceType, - }, - ), + atc.VersionedResourceTypes{ + versionedResourceType, + }, ) Expect(err).NotTo(HaveOccurred()) }) diff --git a/atc/gc/resource_config_check_session_collector_test.go b/atc/gc/resource_config_check_session_collector_test.go index a18e22006..036c4901f 100644 --- a/atc/gc/resource_config_check_session_collector_test.go +++ b/atc/gc/resource_config_check_session_collector_test.go @@ -6,7 +6,6 @@ import ( sq "github.com/Masterminds/squirrel" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/gc" @@ -34,8 +33,8 @@ var _ = Describe("ResourceConfigCheckSessionCollector", func() { var owner db.ContainerOwner ownerExpiries = db.ContainerOwnerExpiries{ - Min: 10 * time.Second, - Max: 10 * time.Second, + Min: 10 * time.Second, + Max: 10 * time.Second, } BeforeEach(func() { @@ -46,11 +45,10 @@ var _ = Describe("ResourceConfigCheckSessionCollector", func() { Expect(found).To(BeTrue()) resourceConfigScope, err = resource.SetResourceConfig( - logger, atc.Source{ "some": "source", }, - creds.VersionedResourceTypes{}) + atc.VersionedResourceTypes{}) Expect(err).ToNot(HaveOccurred()) owner = db.NewResourceConfigCheckSessionContainerOwner(resourceConfigScope.ResourceConfig(), ownerExpiries) diff --git a/atc/gc/resource_config_collector_test.go b/atc/gc/resource_config_collector_test.go index 6cf4de1b2..92aba3bbf 100644 --- a/atc/gc/resource_config_collector_test.go +++ b/atc/gc/resource_config_collector_test.go @@ -5,7 +5,6 @@ import ( "time" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/gc" @@ -40,18 +39,17 @@ var _ = Describe("ResourceConfigCollector", func() { Context("when the config is referenced in resource config check sessions", func() { ownerExpiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 10 * time.Minute, + Min: 5 * time.Minute, + Max: 10 * time.Minute, } BeforeEach(func() { resourceConfig, err := resourceConfigFactory.FindOrCreateResourceConfig( - logger, "some-base-type", atc.Source{ "some": "source", }, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) @@ -84,18 +82,17 @@ var _ = Describe("ResourceConfigCollector", func() { Context("when the config is no longer referenced in resource config check sessions", func() { ownerExpiries := db.ContainerOwnerExpiries{ - Min: 5 * time.Minute, - Max: 10 * time.Minute, + Min: 5 * time.Minute, + Max: 10 * time.Minute, } BeforeEach(func() { resourceConfig, err := resourceConfigFactory.FindOrCreateResourceConfig( - logger, "some-base-type", atc.Source{ "some": "source", }, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) @@ -137,7 +134,6 @@ var _ = Describe("ResourceConfigCollector", func() { Context("when config is referenced in resource caches", func() { BeforeEach(func() { _, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(defaultBuild.ID()), "some-base-type", atc.Version{"some": "version"}, @@ -145,7 +141,7 @@ var _ = Describe("ResourceConfigCollector", func() { "some": "source", }, nil, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) }) @@ -160,7 +156,6 @@ var _ = Describe("ResourceConfigCollector", func() { Context("when config is not referenced in resource caches", func() { BeforeEach(func() { _, err = resourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForBuild(defaultBuild.ID()), "some-base-type", atc.Version{"some": "version"}, @@ -168,7 +163,7 @@ var _ = Describe("ResourceConfigCollector", func() { "some": "source", }, nil, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) @@ -193,9 +188,8 @@ var _ = Describe("ResourceConfigCollector", func() { Context("when config is referenced in resources", func() { BeforeEach(func() { _, err := usedResource.SetResourceConfig( - logger, atc.Source{"some": "source"}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) }) @@ -210,10 +204,9 @@ var _ = Describe("ResourceConfigCollector", func() { Context("when config is not referenced in resources", func() { BeforeEach(func() { _, err := resourceConfigFactory.FindOrCreateResourceConfig( - logger, "some-base-type", atc.Source{"some": "source"}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) _, err = usedResource.Reload() @@ -231,9 +224,8 @@ var _ = Describe("ResourceConfigCollector", func() { Context("when config is referenced in resource types", func() { BeforeEach(func() { _, err := usedResourceType.SetResourceConfig( - logger, atc.Source{"some": "source-type"}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) }) @@ -248,10 +240,9 @@ var _ = Describe("ResourceConfigCollector", func() { Context("when config is not referenced in resource types", func() { BeforeEach(func() { _, err := resourceConfigFactory.FindOrCreateResourceConfig( - logger, "some-base-type", atc.Source{"some": "source-type"}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) _, err = usedResourceType.Reload() diff --git a/atc/integration/configuration_test.go b/atc/integration/configuration_test.go index 734e1fb9c..c40884012 100644 --- a/atc/integration/configuration_test.go +++ b/atc/integration/configuration_test.go @@ -61,22 +61,6 @@ var _ = Describe("ATC Integration Test", func() { }) }) - Context("when cluster name is specified", func() { - BeforeEach(func() { - cmd.Server.ClusterName = "foobar" - }) - - It("renders cluster name into HTML template", func() { - resp, err := http.Get(atcURL) - Expect(err).NotTo(HaveOccurred()) - - bodyBytes, err := ioutil.ReadAll(resp.Body) - Expect(err).NotTo(HaveOccurred()) - Expect(resp.StatusCode).To(Equal(200)) - Expect(string(bodyBytes)).To(ContainSubstring("foobar")) - }) - }) - It("set default team and config auth for the main team", func() { client := webLogin(atcURL, "test", "test") diff --git a/atc/radar/resource_scanner.go b/atc/radar/resource_scanner.go index 8e62cd988..45d504f69 100644 --- a/atc/radar/resource_scanner.go +++ b/atc/radar/resource_scanner.go @@ -154,10 +154,15 @@ func (scanner *resourceScanner) scan(logger lager.Logger, resourceID int, fromVe return 0, err } - versionedResourceTypes := creds.NewVersionedResourceTypes( + versionedResourceTypes, err := creds.NewVersionedResourceTypes( scanner.variables, resourceTypes.Deserialize(), - ) + ).Evaluate() + if err != nil { + logger.Error("failed-to-evaluate-resource-types", err) + scanner.setResourceCheckError(logger, savedResource, err) + return 0, err + } source, err := creds.NewSource(scanner.variables, savedResource.Source()).Evaluate() if err != nil { @@ -167,7 +172,6 @@ func (scanner *resourceScanner) scan(logger lager.Logger, resourceID int, fromVe } resourceConfigScope, err := savedResource.SetResourceConfig( - logger, source, versionedResourceTypes, ) @@ -203,7 +207,6 @@ func (scanner *resourceScanner) scan(logger lager.Logger, resourceID int, fromVe for { lock, acquired, err := resourceConfigScope.AcquireResourceCheckingLock( logger, - interval, ) if err != nil { lockLogger.Error("failed-to-get-lock", err, lager.Data{ @@ -265,7 +268,7 @@ func (scanner *resourceScanner) check( savedResource db.Resource, resourceConfigScope db.ResourceConfigScope, fromVersion atc.Version, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, source atc.Source, saveGiven bool, timeout time.Duration, @@ -317,11 +320,18 @@ func (scanner *resourceScanner) check( } owner := db.NewResourceConfigCheckSessionContainerOwner(resourceConfigScope.ResourceConfig(), ContainerExpiries) - containerMetadata := db.ContainerMetadata{ - Type: db.ContainerTypeCheck, - } - chosenWorker, err := scanner.pool.FindOrChooseWorkerForContainer(logger, owner, containerSpec, workerSpec, scanner.strategy) + chosenWorker, err := scanner.pool.FindOrChooseWorkerForContainer( + context.Background(), + logger, + owner, + containerSpec, + db.ContainerMetadata{ + Type: db.ContainerTypeCheck, + }, + workerSpec, + scanner.strategy, + ) if err != nil { logger.Error("failed-to-choose-a-worker", err) chkErr := resourceConfigScope.SetCheckError(err) @@ -330,17 +340,22 @@ func (scanner *resourceScanner) check( } return err } - container, err := chosenWorker.FindOrCreateContainer( context.Background(), logger, worker.NoopImageFetchingDelegate{}, owner, - containerMetadata, containerSpec, resourceTypes, ) if err != nil { + // TODO: remove this after ephemeral check containers. + // Sometimes we pass in a check session thats too close to + // expirey into FindOrCreateContainer such that the container + // gced before the call is completed + if err == worker.ResourceConfigCheckSessionExpiredError { + return nil + } logger.Error("failed-to-create-or-find-container", err) chkErr := resourceConfigScope.SetCheckError(err) if chkErr != nil { diff --git a/atc/radar/resource_scanner_test.go b/atc/radar/resource_scanner_test.go index c03e60cc1..10d14ec74 100644 --- a/atc/radar/resource_scanner_test.go +++ b/atc/radar/resource_scanner_test.go @@ -42,8 +42,8 @@ var _ = Describe("ResourceScanner", func() { interval time.Duration variables creds.Variables - fakeResourceType *dbfakes.FakeResourceType - versionedResourceType atc.VersionedResourceType + fakeResourceType *dbfakes.FakeResourceType + interpolatedResourceTypes atc.VersionedResourceTypes scanner Scanner @@ -73,13 +73,15 @@ var _ = Describe("ResourceScanner", func() { Tags: atc.Tags{"some-tag"}, } - versionedResourceType = atc.VersionedResourceType{ - ResourceType: atc.ResourceType{ - Name: "some-custom-resource", - Type: "registry-image", - Source: atc.Source{"custom": "((source-params))"}, + interpolatedResourceTypes = atc.VersionedResourceTypes{ + { + ResourceType: atc.ResourceType{ + Name: "some-custom-resource", + Type: "registry-image", + Source: atc.Source{"custom": "some-secret-sauce"}, + }, + Version: atc.Version{"custom": "version"}, }, - Version: atc.Version{"custom": "version"}, } fakeContainer = new(workerfakes.FakeContainer) @@ -165,7 +167,7 @@ var _ = Describe("ResourceScanner", func() { results <- true close(results) - fakeResourceConfigScope.AcquireResourceCheckingLockStub = func(logger lager.Logger, interval time.Duration) (lock.Lock, bool, error) { + fakeResourceConfigScope.AcquireResourceCheckingLockStub = func(logger lager.Logger) (lock.Lock, bool, error) { if <-results { return fakeLock, true, nil } else { @@ -178,16 +180,6 @@ var _ = Describe("ResourceScanner", func() { It("retries every second until it is", func() { Expect(fakeResourceConfigScope.AcquireResourceCheckingLockCallCount()).To(Equal(3)) - - _, leaseInterval := fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(0) - Expect(leaseInterval).To(Equal(interval)) - - _, leaseInterval = fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(1) - Expect(leaseInterval).To(Equal(interval)) - - _, leaseInterval = fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(2) - Expect(leaseInterval).To(Equal(interval)) - Expect(fakeLock.ReleaseCallCount()).To(Equal(1)) }) }) @@ -238,17 +230,15 @@ var _ = Describe("ResourceScanner", func() { It("constructs the resource of the correct type", func() { Expect(fakeDBResource.SetResourceConfigCallCount()).To(Equal(1)) - _, resourceSource, resourceTypes := fakeDBResource.SetResourceConfigArgsForCall(0) + resourceSource, resourceTypes := fakeDBResource.SetResourceConfigArgsForCall(0) Expect(resourceSource).To(Equal(atc.Source{"uri": "some-secret-sauce"})) - Expect(resourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ - versionedResourceType, - }))) + Expect(resourceTypes).To(Equal(interpolatedResourceTypes)) Expect(fakeDBResource.SetCheckSetupErrorCallCount()).To(Equal(1)) err := fakeDBResource.SetCheckSetupErrorArgsForCall(0) Expect(err).To(BeNil()) - _, owner, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, owner, containerSpec, metadata, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, radar.ContainerExpiries))) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "git", @@ -260,19 +250,19 @@ var _ = Describe("ResourceScanner", func() { "RESOURCE_PIPELINE_NAME=some-pipeline", "RESOURCE_NAME=some-resource", })) + Expect(metadata).To(Equal(db.ContainerMetadata{ + Type: db.ContainerTypeCheck, + })) Expect(workerSpec).To(Equal(worker.WorkerSpec{ ResourceType: "git", Tags: atc.Tags{"some-tag"}, - ResourceTypes: creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{versionedResourceType}), + ResourceTypes: interpolatedResourceTypes, TeamID: 123, })) Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - _, _, _, owner, metadata, containerSpec, resourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, owner, containerSpec, resourceTypes = fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, radar.ContainerExpiries))) - Expect(metadata).To(Equal(db.ContainerMetadata{ - Type: db.ContainerTypeCheck, - })) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "git", })) @@ -283,9 +273,7 @@ var _ = Describe("ResourceScanner", func() { "RESOURCE_PIPELINE_NAME=some-pipeline", "RESOURCE_NAME=some-resource", })) - Expect(resourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ - versionedResourceType, - }))) + Expect(resourceTypes).To(Equal(interpolatedResourceTypes)) }) Context("when the resource config has a specified check interval", func() { @@ -298,9 +286,6 @@ var _ = Describe("ResourceScanner", func() { Expect(fakeResourceConfigScope.AcquireResourceCheckingLockCallCount()).To(Equal(1)) Expect(fakeResourceConfigScope.UpdateLastCheckStartTimeCallCount()).To(Equal(1)) - _, leaseInterval := fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(0) - Expect(leaseInterval).To(Equal(10 * time.Millisecond)) - leaseInterval, immediate := fakeResourceConfigScope.UpdateLastCheckStartTimeArgsForCall(0) Expect(leaseInterval).To(Equal(10 * time.Millisecond)) Expect(immediate).To(BeFalse()) @@ -335,9 +320,6 @@ var _ = Describe("ResourceScanner", func() { Expect(fakeResourceConfigScope.AcquireResourceCheckingLockCallCount()).To(Equal(1)) Expect(fakeResourceConfigScope.UpdateLastCheckStartTimeCallCount()).To(Equal(1)) - _, leaseInterval := fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(0) - Expect(leaseInterval).To(Equal(interval)) - leaseInterval, immediate := fakeResourceConfigScope.UpdateLastCheckStartTimeArgsForCall(0) Expect(leaseInterval).To(Equal(interval)) Expect(immediate).To(BeFalse()) @@ -670,17 +652,15 @@ var _ = Describe("ResourceScanner", func() { It("constructs the resource of the correct type", func() { Expect(fakeDBResource.SetResourceConfigCallCount()).To(Equal(1)) - _, resourceSource, resourceTypes := fakeDBResource.SetResourceConfigArgsForCall(0) + resourceSource, resourceTypes := fakeDBResource.SetResourceConfigArgsForCall(0) Expect(resourceSource).To(Equal(atc.Source{"uri": "some-secret-sauce"})) - Expect(resourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ - versionedResourceType, - }))) + Expect(resourceTypes).To(Equal(interpolatedResourceTypes)) Expect(fakeDBResource.SetCheckSetupErrorCallCount()).To(Equal(1)) err := fakeDBResource.SetCheckSetupErrorArgsForCall(0) Expect(err).To(BeNil()) - _, owner, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, owner, containerSpec, metadata, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, radar.ContainerExpiries))) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "git", @@ -692,18 +672,18 @@ var _ = Describe("ResourceScanner", func() { "RESOURCE_PIPELINE_NAME=some-pipeline", "RESOURCE_NAME=some-resource", })) + Expect(metadata).To(Equal(db.ContainerMetadata{ + Type: db.ContainerTypeCheck, + })) Expect(workerSpec).To(Equal(worker.WorkerSpec{ ResourceType: "git", Tags: atc.Tags{"some-tag"}, - ResourceTypes: creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{versionedResourceType}), + ResourceTypes: interpolatedResourceTypes, TeamID: 123, })) - _, _, _, owner, metadata, containerSpec, resourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, owner, containerSpec, resourceTypes = fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, radar.ContainerExpiries))) - Expect(metadata).To(Equal(db.ContainerMetadata{ - Type: db.ContainerTypeCheck, - })) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "git", })) @@ -714,18 +694,13 @@ var _ = Describe("ResourceScanner", func() { "RESOURCE_PIPELINE_NAME=some-pipeline", "RESOURCE_NAME=some-resource", })) - Expect(resourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ - versionedResourceType, - }))) + Expect(resourceTypes).To(Equal(interpolatedResourceTypes)) }) It("grabs an immediate resource checking lock before checking, breaks lock after done", func() { Expect(fakeResourceConfigScope.AcquireResourceCheckingLockCallCount()).To(Equal(1)) Expect(fakeResourceConfigScope.UpdateLastCheckStartTimeCallCount()).To(Equal(1)) - _, leaseInterval := fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(0) - Expect(leaseInterval).To(Equal(interval)) - leaseInterval, immediate := fakeResourceConfigScope.UpdateLastCheckStartTimeArgsForCall(0) Expect(leaseInterval).To(Equal(interval)) Expect(immediate).To(BeTrue()) @@ -785,9 +760,6 @@ var _ = Describe("ResourceScanner", func() { Expect(fakeResourceConfigScope.AcquireResourceCheckingLockCallCount()).To(Equal(1)) Expect(fakeResourceConfigScope.UpdateLastCheckStartTimeCallCount()).To(Equal(1)) - _, leaseInterval := fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(0) - Expect(leaseInterval).To(Equal(10 * time.Millisecond)) - leaseInterval, immediate := fakeResourceConfigScope.UpdateLastCheckStartTimeArgsForCall(0) Expect(leaseInterval).To(Equal(10 * time.Millisecond)) Expect(immediate).To(BeTrue()) diff --git a/atc/radar/resource_type_scanner.go b/atc/radar/resource_type_scanner.go index 7ee3f16e3..dd9ba7837 100644 --- a/atc/radar/resource_type_scanner.go +++ b/atc/radar/resource_type_scanner.go @@ -115,10 +115,15 @@ func (scanner *resourceTypeScanner) scan(logger lager.Logger, resourceTypeID int return 0, err } - versionedResourceTypes := creds.NewVersionedResourceTypes( + versionedResourceTypes, err := creds.NewVersionedResourceTypes( scanner.variables, resourceTypes.Deserialize(), - ) + ).Evaluate() + if err != nil { + logger.Error("failed-to-evaluate-resource-types", err) + scanner.setCheckError(logger, savedResourceType, err) + return 0, err + } source, err := creds.NewSource(scanner.variables, savedResourceType.Source()).Evaluate() if err != nil { @@ -128,7 +133,6 @@ func (scanner *resourceTypeScanner) scan(logger lager.Logger, resourceTypeID int } resourceConfigScope, err := savedResourceType.SetResourceConfig( - logger, source, versionedResourceTypes.Without(savedResourceType.Name()), ) @@ -146,7 +150,6 @@ func (scanner *resourceTypeScanner) scan(logger lager.Logger, resourceTypeID int reattempt = mustComplete lock, acquired, err := resourceConfigScope.AcquireResourceCheckingLock( logger, - interval, ) if err != nil { lockLogger.Error("failed-to-get-lock", err, lager.Data{ @@ -218,7 +221,7 @@ func (scanner *resourceTypeScanner) check( savedResourceType db.ResourceType, resourceConfigScope db.ResourceConfigScope, fromVersion atc.Version, - versionedResourceTypes creds.VersionedResourceTypes, + versionedResourceTypes atc.VersionedResourceTypes, source atc.Source, saveGiven bool, ) error { @@ -253,7 +256,17 @@ func (scanner *resourceTypeScanner) check( owner := db.NewResourceConfigCheckSessionContainerOwner(resourceConfigScope.ResourceConfig(), ContainerExpiries) - chosenWorker, err := scanner.pool.FindOrChooseWorkerForContainer(logger, owner, containerSpec, workerSpec, scanner.strategy) + chosenWorker, err := scanner.pool.FindOrChooseWorkerForContainer( + context.Background(), + logger, + owner, + containerSpec, + db.ContainerMetadata{ + Type: db.ContainerTypeCheck, + }, + workerSpec, + scanner.strategy, + ) if err != nil { chkErr := resourceConfigScope.SetCheckError(err) if chkErr != nil { @@ -268,9 +281,6 @@ func (scanner *resourceTypeScanner) check( logger, worker.NoopImageFetchingDelegate{}, db.NewResourceConfigCheckSessionContainerOwner(resourceConfigScope.ResourceConfig(), ContainerExpiries), - db.ContainerMetadata{ - Type: db.ContainerTypeCheck, - }, containerSpec, versionedResourceTypes.Without(savedResourceType.Name()), ) diff --git a/atc/radar/resource_type_scanner_test.go b/atc/radar/resource_type_scanner_test.go index 5d26ea957..6ae581848 100644 --- a/atc/radar/resource_type_scanner_test.go +++ b/atc/radar/resource_type_scanner_test.go @@ -42,8 +42,8 @@ var _ = Describe("ResourceTypeScanner", func() { interval time.Duration variables creds.Variables - fakeResourceType *dbfakes.FakeResourceType - versionedResourceType atc.VersionedResourceType + fakeResourceType *dbfakes.FakeResourceType + interpolatedResourceTypes atc.VersionedResourceTypes scanner Scanner @@ -58,14 +58,16 @@ var _ = Describe("ResourceTypeScanner", func() { "source-params": "some-secret-sauce", } - versionedResourceType = atc.VersionedResourceType{ - ResourceType: atc.ResourceType{ - Name: "some-custom-resource", - Type: "registry-image", - Source: atc.Source{"custom": "((source-params))"}, - Tags: atc.Tags{"some-tag"}, + interpolatedResourceTypes = atc.VersionedResourceTypes{ + { + ResourceType: atc.ResourceType{ + Name: "some-custom-resource", + Type: "registry-image", + Source: atc.Source{"custom": "some-secret-sauce"}, + Tags: atc.Tags{"some-tag"}, + }, + Version: atc.Version{"custom": "version"}, }, - Version: atc.Version{"custom": "version"}, } fakeClock = fakeclock.NewFakeClock(epoch) @@ -180,36 +182,36 @@ var _ = Describe("ResourceTypeScanner", func() { It("constructs the resource of the correct type", func() { Expect(fakeResourceType.SetResourceConfigCallCount()).To(Equal(1)) - _, resourceSource, resourceTypes := fakeResourceType.SetResourceConfigArgsForCall(0) + resourceSource, resourceTypes := fakeResourceType.SetResourceConfigArgsForCall(0) Expect(resourceSource).To(Equal(atc.Source{"custom": "some-secret-sauce"})) - Expect(resourceTypes).To(Equal(creds.VersionedResourceTypes{})) + Expect(resourceTypes).To(Equal(atc.VersionedResourceTypes{})) - _, owner, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, owner, containerSpec, metadata, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, ContainerExpiries))) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "registry-image", })) Expect(containerSpec.Tags).To(Equal([]string{"some-tag"})) Expect(containerSpec.TeamID).To(Equal(123)) + Expect(metadata).To(Equal(db.ContainerMetadata{ + Type: db.ContainerTypeCheck, + })) Expect(workerSpec).To(Equal(worker.WorkerSpec{ ResourceType: "registry-image", Tags: []string{"some-tag"}, - ResourceTypes: creds.VersionedResourceTypes{}, + ResourceTypes: atc.VersionedResourceTypes{}, TeamID: 123, })) Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - _, _, _, owner, metadata, containerSpec, resourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, owner, containerSpec, resourceTypes = fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, ContainerExpiries))) - Expect(metadata).To(Equal(db.ContainerMetadata{ - Type: db.ContainerTypeCheck, - })) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "registry-image", })) Expect(containerSpec.Tags).To(Equal([]string{"some-tag"})) Expect(containerSpec.TeamID).To(Equal(123)) - Expect(resourceTypes).To(Equal(creds.VersionedResourceTypes{})) + Expect(resourceTypes).To(Equal(atc.VersionedResourceTypes{})) }) Context("when the resource type overrides a base resource type", func() { @@ -233,41 +235,37 @@ var _ = Describe("ResourceTypeScanner", func() { It("constructs the resource of the correct type", func() { Expect(fakeResourceType.SetResourceConfigCallCount()).To(Equal(1)) - _, resourceSource, resourceTypes := fakeResourceType.SetResourceConfigArgsForCall(0) + resourceSource, resourceTypes := fakeResourceType.SetResourceConfigArgsForCall(0) Expect(resourceSource).To(Equal(atc.Source{"custom": "some-secret-sauce"})) - Expect(resourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ - versionedResourceType, - }))) + Expect(resourceTypes).To(Equal(interpolatedResourceTypes)) Expect(fakeResourceType.SetCheckSetupErrorCallCount()).To(Equal(1)) err := fakeResourceType.SetCheckSetupErrorArgsForCall(0) Expect(err).To(BeNil()) - _, owner, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, owner, containerSpec, metadata, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, ContainerExpiries))) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "registry-image", })) Expect(containerSpec.TeamID).To(Equal(123)) + Expect(metadata).To(Equal(db.ContainerMetadata{ + Type: db.ContainerTypeCheck, + })) Expect(workerSpec).To(Equal(worker.WorkerSpec{ ResourceType: "registry-image", - ResourceTypes: creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{versionedResourceType}), + ResourceTypes: interpolatedResourceTypes, TeamID: 123, })) Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - _, _, _, owner, metadata, containerSpec, resourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, owner, containerSpec, resourceTypes = fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, ContainerExpiries))) - Expect(metadata).To(Equal(db.ContainerMetadata{ - Type: db.ContainerTypeCheck, - })) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "registry-image", })) Expect(containerSpec.TeamID).To(Equal(123)) - Expect(resourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ - versionedResourceType, - }))) + Expect(resourceTypes).To(Equal(interpolatedResourceTypes)) }) }) @@ -281,9 +279,6 @@ var _ = Describe("ResourceTypeScanner", func() { Expect(fakeResourceConfigScope.AcquireResourceCheckingLockCallCount()).To(Equal(1)) Expect(fakeResourceConfigScope.UpdateLastCheckStartTimeCallCount()).To(Equal(1)) - _, leaseInterval := fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(0) - Expect(leaseInterval).To(Equal(10 * time.Millisecond)) - leaseInterval, immediate := fakeResourceConfigScope.UpdateLastCheckStartTimeArgsForCall(0) Expect(leaseInterval).To(Equal(10 * time.Millisecond)) Expect(immediate).To(BeFalse()) @@ -318,9 +313,6 @@ var _ = Describe("ResourceTypeScanner", func() { Expect(fakeResourceConfigScope.AcquireResourceCheckingLockCallCount()).To(Equal(1)) Expect(fakeResourceConfigScope.UpdateLastCheckStartTimeCallCount()).To(Equal(1)) - _, leaseInterval := fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(0) - Expect(leaseInterval).To(Equal(interval)) - leaseInterval, immediate := fakeResourceConfigScope.UpdateLastCheckStartTimeArgsForCall(0) Expect(leaseInterval).To(Equal(interval)) Expect(immediate).To(BeFalse()) @@ -469,40 +461,40 @@ var _ = Describe("ResourceTypeScanner", func() { It("constructs the resource of the correct type", func() { Expect(fakeResourceType.SetResourceConfigCallCount()).To(Equal(1)) - _, resourceSource, resourceTypes := fakeResourceType.SetResourceConfigArgsForCall(0) + resourceSource, resourceTypes := fakeResourceType.SetResourceConfigArgsForCall(0) Expect(resourceSource).To(Equal(atc.Source{"custom": "some-secret-sauce"})) - Expect(resourceTypes).To(Equal(creds.VersionedResourceTypes{})) + Expect(resourceTypes).To(Equal(atc.VersionedResourceTypes{})) Expect(fakeResourceType.SetCheckSetupErrorCallCount()).To(Equal(1)) err := fakeResourceType.SetCheckSetupErrorArgsForCall(0) Expect(err).To(BeNil()) - _, owner, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, owner, containerSpec, metadata, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, ContainerExpiries))) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "registry-image", })) Expect(containerSpec.Tags).To(Equal([]string{"some-tag"})) Expect(containerSpec.TeamID).To(Equal(123)) + Expect(metadata).To(Equal(db.ContainerMetadata{ + Type: db.ContainerTypeCheck, + })) Expect(workerSpec).To(Equal(worker.WorkerSpec{ ResourceType: "registry-image", Tags: []string{"some-tag"}, - ResourceTypes: creds.VersionedResourceTypes{}, + ResourceTypes: atc.VersionedResourceTypes{}, TeamID: 123, })) Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - _, _, _, owner, metadata, containerSpec, resourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, owner, containerSpec, resourceTypes = fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, ContainerExpiries))) - Expect(metadata).To(Equal(db.ContainerMetadata{ - Type: db.ContainerTypeCheck, - })) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "registry-image", })) Expect(containerSpec.Tags).To(Equal([]string{"some-tag"})) Expect(containerSpec.TeamID).To(Equal(123)) - Expect(resourceTypes).To(Equal(creds.VersionedResourceTypes{})) + Expect(resourceTypes).To(Equal(atc.VersionedResourceTypes{})) }) Context("when the resource type depends on another custom type", func() { @@ -597,13 +589,11 @@ var _ = Describe("ResourceTypeScanner", func() { It("constructs the resource of the correct type", func() { Expect(fakeResourceType.SetResourceConfigCallCount()).To(Equal(1)) - _, resourceSource, resourceTypes := fakeResourceType.SetResourceConfigArgsForCall(0) + resourceSource, resourceTypes := fakeResourceType.SetResourceConfigArgsForCall(0) Expect(resourceSource).To(Equal(atc.Source{"custom": "some-secret-sauce"})) - Expect(resourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ - versionedResourceType, - }))) + Expect(resourceTypes).To(Equal(interpolatedResourceTypes)) - _, owner, containerSpec, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) + _, _, owner, containerSpec, metadata, workerSpec, _ := fakePool.FindOrChooseWorkerForContainerArgsForCall(0) Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, ContainerExpiries))) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "registry-image", @@ -611,23 +601,21 @@ var _ = Describe("ResourceTypeScanner", func() { Expect(containerSpec.TeamID).To(Equal(123)) Expect(workerSpec).To(Equal(worker.WorkerSpec{ ResourceType: "registry-image", - ResourceTypes: creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{versionedResourceType}), + ResourceTypes: interpolatedResourceTypes, TeamID: 123, })) - - Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - _, _, _, owner, metadata, containerSpec, resourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) - Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, ContainerExpiries))) Expect(metadata).To(Equal(db.ContainerMetadata{ Type: db.ContainerTypeCheck, })) + + Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) + _, _, _, owner, containerSpec, resourceTypes = fakeWorker.FindOrCreateContainerArgsForCall(0) + Expect(owner).To(Equal(db.NewResourceConfigCheckSessionContainerOwner(fakeResourceConfig, ContainerExpiries))) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "registry-image", })) Expect(containerSpec.TeamID).To(Equal(123)) - Expect(resourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ - versionedResourceType, - }))) + Expect(resourceTypes).To(Equal(interpolatedResourceTypes)) }) }) @@ -635,9 +623,6 @@ var _ = Describe("ResourceTypeScanner", func() { Expect(fakeResourceConfigScope.AcquireResourceCheckingLockCallCount()).To(Equal(1)) Expect(fakeResourceConfigScope.UpdateLastCheckStartTimeCallCount()).To(Equal(1)) - _, leaseInterval := fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(0) - Expect(leaseInterval).To(Equal(interval)) - leaseInterval, immediate := fakeResourceConfigScope.UpdateLastCheckStartTimeArgsForCall(0) Expect(leaseInterval).To(Equal(interval)) Expect(immediate).To(BeTrue()) @@ -776,7 +761,7 @@ var _ = Describe("ResourceTypeScanner", func() { results <- true close(results) - fakeResourceConfigScope.AcquireResourceCheckingLockStub = func(logger lager.Logger, interval time.Duration) (lock.Lock, bool, error) { + fakeResourceConfigScope.AcquireResourceCheckingLockStub = func(logger lager.Logger) (lock.Lock, bool, error) { if <-results { return fakeLock, true, nil } else { @@ -789,16 +774,6 @@ var _ = Describe("ResourceTypeScanner", func() { It("retries every second until it is", func() { Expect(fakeResourceConfigScope.AcquireResourceCheckingLockCallCount()).To(Equal(3)) - - _, leaseInterval := fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(0) - Expect(leaseInterval).To(Equal(interval)) - - _, leaseInterval = fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(1) - Expect(leaseInterval).To(Equal(interval)) - - _, leaseInterval = fakeResourceConfigScope.AcquireResourceCheckingLockArgsForCall(2) - Expect(leaseInterval).To(Equal(interval)) - Expect(fakeLock.ReleaseCallCount()).To(Equal(1)) }) }) diff --git a/atc/resource/fetcher.go b/atc/resource/fetcher.go index 82916c7e3..9ff332b0d 100644 --- a/atc/resource/fetcher.go +++ b/atc/resource/fetcher.go @@ -8,7 +8,7 @@ import ( "code.cloudfoundry.org/clock" "code.cloudfoundry.org/lager" - "github.com/concourse/concourse/atc/creds" + "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/db/lock" "github.com/concourse/concourse/atc/worker" ) @@ -27,7 +27,7 @@ type Fetcher interface { session Session, gardenWorker worker.Worker, containerSpec worker.ContainerSpec, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, resourceInstance ResourceInstance, imageFetchingDelegate worker.ImageFetchingDelegate, ) (VersionedSource, error) @@ -57,7 +57,7 @@ func (f *fetcher) Fetch( session Session, gardenWorker worker.Worker, containerSpec worker.ContainerSpec, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, resourceInstance ResourceInstance, imageFetchingDelegate worker.ImageFetchingDelegate, ) (VersionedSource, error) { diff --git a/atc/resource/fetcher_test.go b/atc/resource/fetcher_test.go index a4189b6a6..e2ab21710 100644 --- a/atc/resource/fetcher_test.go +++ b/atc/resource/fetcher_test.go @@ -8,7 +8,7 @@ import ( "code.cloudfoundry.org/clock/fakeclock" "code.cloudfoundry.org/lager" "code.cloudfoundry.org/lager/lagertest" - "github.com/concourse/concourse/atc/creds" + "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/db/lock" "github.com/concourse/concourse/atc/db/lock/lockfakes" "github.com/concourse/concourse/atc/resource" @@ -67,7 +67,7 @@ var _ = Describe("Fetcher", func() { worker.ContainerSpec{ TeamID: teamID, }, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, new(resourcefakes.FakeResourceInstance), fakeBuildStepDelegate, ) diff --git a/atc/resource/resource_instance.go b/atc/resource/resource_instance.go index b214aa040..90e360345 100644 --- a/atc/resource/resource_instance.go +++ b/atc/resource/resource_instance.go @@ -7,7 +7,6 @@ import ( "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/worker" ) @@ -34,7 +33,7 @@ type resourceInstance struct { version atc.Version source atc.Source params atc.Params - resourceTypes creds.VersionedResourceTypes + resourceTypes atc.VersionedResourceTypes resourceCache db.UsedResourceCache containerOwner db.ContainerOwner @@ -45,7 +44,7 @@ func NewResourceInstance( version atc.Version, source atc.Source, params atc.Params, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, resourceCache db.UsedResourceCache, containerOwner db.ContainerOwner, diff --git a/atc/resource/resource_instance_fetch_source.go b/atc/resource/resource_instance_fetch_source.go index 449732e33..77c2aa95e 100644 --- a/atc/resource/resource_instance_fetch_source.go +++ b/atc/resource/resource_instance_fetch_source.go @@ -4,7 +4,7 @@ import ( "context" "code.cloudfoundry.org/lager" - "github.com/concourse/concourse/atc/creds" + "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/worker" ) @@ -24,7 +24,7 @@ type FetchSourceFactory interface { logger lager.Logger, worker worker.Worker, resourceInstance ResourceInstance, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, containerSpec worker.ContainerSpec, session Session, imageFetchingDelegate worker.ImageFetchingDelegate, @@ -50,7 +50,7 @@ func (r *fetchSourceFactory) NewFetchSource( logger lager.Logger, worker worker.Worker, resourceInstance ResourceInstance, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, containerSpec worker.ContainerSpec, session Session, imageFetchingDelegate worker.ImageFetchingDelegate, @@ -72,7 +72,7 @@ type resourceInstanceFetchSource struct { logger lager.Logger worker worker.Worker resourceInstance ResourceInstance - resourceTypes creds.VersionedResourceTypes + resourceTypes atc.VersionedResourceTypes containerSpec worker.ContainerSpec session Session imageFetchingDelegate worker.ImageFetchingDelegate @@ -130,12 +130,21 @@ func (s *resourceInstanceFetchSource) Create(ctx context.Context) (VersionedSour &worker.CertsVolumeMount{Logger: s.logger}, } + err = s.worker.EnsureDBContainerExists( + ctx, + s.logger, + s.resourceInstance.ContainerOwner(), + s.session.Metadata, + ) + if err != nil { + return nil, err + } + container, err := s.worker.FindOrCreateContainer( ctx, s.logger, s.imageFetchingDelegate, s.resourceInstance.ContainerOwner(), - s.session.Metadata, s.containerSpec, s.resourceTypes, ) diff --git a/atc/resource/resource_instance_fetch_source_test.go b/atc/resource/resource_instance_fetch_source_test.go index faf8292b9..40158902a 100644 --- a/atc/resource/resource_instance_fetch_source_test.go +++ b/atc/resource/resource_instance_fetch_source_test.go @@ -7,9 +7,7 @@ import ( "code.cloudfoundry.org/garden" "code.cloudfoundry.org/garden/gardenfakes" "code.cloudfoundry.org/lager/lagertest" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/dbfakes" "github.com/concourse/concourse/atc/resource" @@ -33,7 +31,8 @@ var _ = Describe("ResourceInstanceFetchSource", func() { fakeResourceCacheFactory *dbfakes.FakeResourceCacheFactory fakeUsedResourceCache *dbfakes.FakeUsedResourceCache fakeDelegate *workerfakes.FakeImageFetchingDelegate - resourceTypes creds.VersionedResourceTypes + resourceTypes atc.VersionedResourceTypes + metadata db.ContainerMetadata ctx context.Context cancel func() @@ -88,23 +87,20 @@ var _ = Describe("ResourceInstanceFetchSource", func() { fakeDelegate = new(workerfakes.FakeImageFetchingDelegate) - variables := template.StaticVariables{ - "secret-custom": "source", - } - - resourceTypes = creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + resourceTypes = atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "custom-resource", Type: "custom-type", - Source: atc.Source{"some-custom": "((secret-custom))"}, + Source: atc.Source{"some-custom": "source"}, }, Version: atc.Version{"some-custom": "version"}, }, - }) + } resourceFactory := resource.NewResourceFactory() fetchSourceFactory = resource.NewFetchSourceFactory(fakeResourceCacheFactory, resourceFactory) + metadata = db.ContainerMetadata{Type: db.ContainerTypeGet} fetchSource = fetchSourceFactory.NewFetchSource( logger, fakeWorker, @@ -120,7 +116,9 @@ var _ = Describe("ResourceInstanceFetchSource", func() { "resource": resource.ResourcesDir("get"), }, }, - resource.Session{}, + resource.Session{ + Metadata: metadata, + }, fakeDelegate, ) }) @@ -204,11 +202,15 @@ var _ = Describe("ResourceInstanceFetchSource", func() { It("creates container with volume and worker", func() { Expect(initErr).NotTo(HaveOccurred()) + Expect(fakeWorker.EnsureDBContainerExistsCallCount()).To(Equal(1)) + _, _, owner, actualMetadata := fakeWorker.EnsureDBContainerExistsArgsForCall(0) + Expect(owner).To(Equal(db.NewBuildStepContainerOwner(43, atc.PlanID("some-plan-id"), 42))) + Expect(actualMetadata).To(Equal(metadata)) + Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - _, logger, delegate, owner, metadata, containerSpec, types := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, logger, delegate, owner, containerSpec, types := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(delegate).To(Equal(fakeDelegate)) Expect(owner).To(Equal(db.NewBuildStepContainerOwner(43, atc.PlanID("some-plan-id"), 42))) - Expect(metadata).To(BeZero()) Expect(containerSpec).To(Equal(worker.ContainerSpec{ TeamID: 42, Tags: []string{}, diff --git a/atc/resource/resource_instance_test.go b/atc/resource/resource_instance_test.go index c2f0b2f50..dbca9d101 100644 --- a/atc/resource/resource_instance_test.go +++ b/atc/resource/resource_instance_test.go @@ -6,7 +6,6 @@ import ( "code.cloudfoundry.org/lager" "code.cloudfoundry.org/lager/lagertest" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/dbfakes" . "github.com/concourse/concourse/atc/resource" @@ -36,7 +35,7 @@ var _ = Describe("ResourceInstance", func() { atc.Version{"some": "version"}, atc.Source{"some": "source"}, atc.Params{"some": "params"}, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, fakeResourceCache, db.NewBuildStepContainerOwner(42, atc.PlanID("some-plan-id"), 1), ) diff --git a/atc/resource/resourcefakes/fake_fetch_source_factory.go b/atc/resource/resourcefakes/fake_fetch_source_factory.go index 896b12011..1172a8082 100644 --- a/atc/resource/resourcefakes/fake_fetch_source_factory.go +++ b/atc/resource/resourcefakes/fake_fetch_source_factory.go @@ -5,19 +5,19 @@ import ( "sync" "code.cloudfoundry.org/lager" - "github.com/concourse/concourse/atc/creds" + "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/resource" "github.com/concourse/concourse/atc/worker" ) type FakeFetchSourceFactory struct { - NewFetchSourceStub func(lager.Logger, worker.Worker, resource.ResourceInstance, creds.VersionedResourceTypes, worker.ContainerSpec, resource.Session, worker.ImageFetchingDelegate) resource.FetchSource + NewFetchSourceStub func(lager.Logger, worker.Worker, resource.ResourceInstance, atc.VersionedResourceTypes, worker.ContainerSpec, resource.Session, worker.ImageFetchingDelegate) resource.FetchSource newFetchSourceMutex sync.RWMutex newFetchSourceArgsForCall []struct { arg1 lager.Logger arg2 worker.Worker arg3 resource.ResourceInstance - arg4 creds.VersionedResourceTypes + arg4 atc.VersionedResourceTypes arg5 worker.ContainerSpec arg6 resource.Session arg7 worker.ImageFetchingDelegate @@ -32,14 +32,14 @@ type FakeFetchSourceFactory struct { invocationsMutex sync.RWMutex } -func (fake *FakeFetchSourceFactory) NewFetchSource(arg1 lager.Logger, arg2 worker.Worker, arg3 resource.ResourceInstance, arg4 creds.VersionedResourceTypes, arg5 worker.ContainerSpec, arg6 resource.Session, arg7 worker.ImageFetchingDelegate) resource.FetchSource { +func (fake *FakeFetchSourceFactory) NewFetchSource(arg1 lager.Logger, arg2 worker.Worker, arg3 resource.ResourceInstance, arg4 atc.VersionedResourceTypes, arg5 worker.ContainerSpec, arg6 resource.Session, arg7 worker.ImageFetchingDelegate) resource.FetchSource { fake.newFetchSourceMutex.Lock() ret, specificReturn := fake.newFetchSourceReturnsOnCall[len(fake.newFetchSourceArgsForCall)] fake.newFetchSourceArgsForCall = append(fake.newFetchSourceArgsForCall, struct { arg1 lager.Logger arg2 worker.Worker arg3 resource.ResourceInstance - arg4 creds.VersionedResourceTypes + arg4 atc.VersionedResourceTypes arg5 worker.ContainerSpec arg6 resource.Session arg7 worker.ImageFetchingDelegate @@ -62,13 +62,13 @@ func (fake *FakeFetchSourceFactory) NewFetchSourceCallCount() int { return len(fake.newFetchSourceArgsForCall) } -func (fake *FakeFetchSourceFactory) NewFetchSourceCalls(stub func(lager.Logger, worker.Worker, resource.ResourceInstance, creds.VersionedResourceTypes, worker.ContainerSpec, resource.Session, worker.ImageFetchingDelegate) resource.FetchSource) { +func (fake *FakeFetchSourceFactory) NewFetchSourceCalls(stub func(lager.Logger, worker.Worker, resource.ResourceInstance, atc.VersionedResourceTypes, worker.ContainerSpec, resource.Session, worker.ImageFetchingDelegate) resource.FetchSource) { fake.newFetchSourceMutex.Lock() defer fake.newFetchSourceMutex.Unlock() fake.NewFetchSourceStub = stub } -func (fake *FakeFetchSourceFactory) NewFetchSourceArgsForCall(i int) (lager.Logger, worker.Worker, resource.ResourceInstance, creds.VersionedResourceTypes, worker.ContainerSpec, resource.Session, worker.ImageFetchingDelegate) { +func (fake *FakeFetchSourceFactory) NewFetchSourceArgsForCall(i int) (lager.Logger, worker.Worker, resource.ResourceInstance, atc.VersionedResourceTypes, worker.ContainerSpec, resource.Session, worker.ImageFetchingDelegate) { fake.newFetchSourceMutex.RLock() defer fake.newFetchSourceMutex.RUnlock() argsForCall := fake.newFetchSourceArgsForCall[i] diff --git a/atc/resource/resourcefakes/fake_fetcher.go b/atc/resource/resourcefakes/fake_fetcher.go index a258c775d..6a0069b1e 100644 --- a/atc/resource/resourcefakes/fake_fetcher.go +++ b/atc/resource/resourcefakes/fake_fetcher.go @@ -6,13 +6,13 @@ import ( "sync" "code.cloudfoundry.org/lager" - "github.com/concourse/concourse/atc/creds" + "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/resource" "github.com/concourse/concourse/atc/worker" ) type FakeFetcher struct { - FetchStub func(context.Context, lager.Logger, resource.Session, worker.Worker, worker.ContainerSpec, creds.VersionedResourceTypes, resource.ResourceInstance, worker.ImageFetchingDelegate) (resource.VersionedSource, error) + FetchStub func(context.Context, lager.Logger, resource.Session, worker.Worker, worker.ContainerSpec, atc.VersionedResourceTypes, resource.ResourceInstance, worker.ImageFetchingDelegate) (resource.VersionedSource, error) fetchMutex sync.RWMutex fetchArgsForCall []struct { arg1 context.Context @@ -20,7 +20,7 @@ type FakeFetcher struct { arg3 resource.Session arg4 worker.Worker arg5 worker.ContainerSpec - arg6 creds.VersionedResourceTypes + arg6 atc.VersionedResourceTypes arg7 resource.ResourceInstance arg8 worker.ImageFetchingDelegate } @@ -36,7 +36,7 @@ type FakeFetcher struct { invocationsMutex sync.RWMutex } -func (fake *FakeFetcher) Fetch(arg1 context.Context, arg2 lager.Logger, arg3 resource.Session, arg4 worker.Worker, arg5 worker.ContainerSpec, arg6 creds.VersionedResourceTypes, arg7 resource.ResourceInstance, arg8 worker.ImageFetchingDelegate) (resource.VersionedSource, error) { +func (fake *FakeFetcher) Fetch(arg1 context.Context, arg2 lager.Logger, arg3 resource.Session, arg4 worker.Worker, arg5 worker.ContainerSpec, arg6 atc.VersionedResourceTypes, arg7 resource.ResourceInstance, arg8 worker.ImageFetchingDelegate) (resource.VersionedSource, error) { fake.fetchMutex.Lock() ret, specificReturn := fake.fetchReturnsOnCall[len(fake.fetchArgsForCall)] fake.fetchArgsForCall = append(fake.fetchArgsForCall, struct { @@ -45,7 +45,7 @@ func (fake *FakeFetcher) Fetch(arg1 context.Context, arg2 lager.Logger, arg3 res arg3 resource.Session arg4 worker.Worker arg5 worker.ContainerSpec - arg6 creds.VersionedResourceTypes + arg6 atc.VersionedResourceTypes arg7 resource.ResourceInstance arg8 worker.ImageFetchingDelegate }{arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8}) @@ -67,13 +67,13 @@ func (fake *FakeFetcher) FetchCallCount() int { return len(fake.fetchArgsForCall) } -func (fake *FakeFetcher) FetchCalls(stub func(context.Context, lager.Logger, resource.Session, worker.Worker, worker.ContainerSpec, creds.VersionedResourceTypes, resource.ResourceInstance, worker.ImageFetchingDelegate) (resource.VersionedSource, error)) { +func (fake *FakeFetcher) FetchCalls(stub func(context.Context, lager.Logger, resource.Session, worker.Worker, worker.ContainerSpec, atc.VersionedResourceTypes, resource.ResourceInstance, worker.ImageFetchingDelegate) (resource.VersionedSource, error)) { fake.fetchMutex.Lock() defer fake.fetchMutex.Unlock() fake.FetchStub = stub } -func (fake *FakeFetcher) FetchArgsForCall(i int) (context.Context, lager.Logger, resource.Session, worker.Worker, worker.ContainerSpec, creds.VersionedResourceTypes, resource.ResourceInstance, worker.ImageFetchingDelegate) { +func (fake *FakeFetcher) FetchArgsForCall(i int) (context.Context, lager.Logger, resource.Session, worker.Worker, worker.ContainerSpec, atc.VersionedResourceTypes, resource.ResourceInstance, worker.ImageFetchingDelegate) { fake.fetchMutex.RLock() defer fake.fetchMutex.RUnlock() argsForCall := fake.fetchArgsForCall[i] diff --git a/atc/scheduler/buildstarter.go b/atc/scheduler/buildstarter.go index 59a4622b1..e93f4a574 100644 --- a/atc/scheduler/buildstarter.go +++ b/atc/scheduler/buildstarter.go @@ -80,6 +80,16 @@ func (s *buildStarter) tryStartNextPendingBuild( "build-name": nextPendingBuild.Name(), }) + if nextPendingBuild.IsAborted() { + logger.Debug("cancel-aborted-pending-build") + err := nextPendingBuild.Finish(db.BuildStatusAborted) + if err != nil { + return false, err + } + + return true, nil + } + reachedMaxInFlight, err := s.maxInFlightUpdater.UpdateMaxInFlightReached(logger, job, nextPendingBuild.ID()) if err != nil { return false, err diff --git a/atc/scheduler/buildstarter_test.go b/atc/scheduler/buildstarter_test.go index 396124cc3..c902e7a92 100644 --- a/atc/scheduler/buildstarter_test.go +++ b/atc/scheduler/buildstarter_test.go @@ -69,6 +69,48 @@ var _ = Describe("BuildStarter", func() { resource.NameReturns("some-resource") }) + Context("when one pending build is aborted before start", func() { + var abortedBuild *dbfakes.FakeBuild + + BeforeEach(func() { + job = new(dbfakes.FakeJob) + job.NameReturns("some-job") + job.ConfigReturns(atc.JobConfig{Plan: atc.PlanSequence{{Get: "input-1", Resource: "some-resource"}, {Get: "input-2", Resource: "some-resource"}}}) + + abortedBuild = new(dbfakes.FakeBuild) + abortedBuild.IDReturns(42) + abortedBuild.IsAbortedReturns(true) + + // make sure pending build can be started after another pending build is aborted + pendingBuilds = append([]db.Build{abortedBuild}, pendingBuilds...) + resources = db.Resources{resource} + }) + + JustBeforeEach(func() { + tryStartErr = buildStarter.TryStartPendingBuildsForJob( + lagertest.NewTestLogger("test"), + job, + resources, + versionedResourceTypes, + pendingBuilds, + ) + }) + + It("won't try to start the aborted pending build", func() { + Expect(abortedBuild.FinishCallCount()).To(Equal(1)) + }) + + It("will try to start the next non aborted pending build", func() { + Expect(fakeUpdater.UpdateMaxInFlightReachedCallCount()).To(Equal(1)) + _, _, buildID := fakeUpdater.UpdateMaxInFlightReachedArgsForCall(0) + Expect(buildID).To(Equal(66)) + }) + + It("returns without error", func() { + Expect(tryStartErr).NotTo(HaveOccurred()) + }) + }) + Context("when manually triggered", func() { BeforeEach(func() { job = new(dbfakes.FakeJob) @@ -274,7 +316,6 @@ var _ = Describe("BuildStarter", func() { }})) }) }) - }) }) }) diff --git a/atc/worker/container_spec.go b/atc/worker/container_spec.go index c3770d9ad..e83032ba5 100644 --- a/atc/worker/container_spec.go +++ b/atc/worker/container_spec.go @@ -6,7 +6,6 @@ import ( "code.cloudfoundry.org/garden" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" ) type WorkerSpec struct { @@ -14,7 +13,7 @@ type WorkerSpec struct { ResourceType string Tags []string TeamID int - ResourceTypes creds.VersionedResourceTypes + ResourceTypes atc.VersionedResourceTypes } type ContainerSpec struct { @@ -71,7 +70,7 @@ type ImageSpec struct { type ImageResource struct { Type string - Source creds.Source + Source atc.Source Params *atc.Params Version *atc.Version } diff --git a/atc/worker/db_worker_provider_test.go b/atc/worker/db_worker_provider_test.go index 04fcf896e..41a7697bb 100644 --- a/atc/worker/db_worker_provider_test.go +++ b/atc/worker/db_worker_provider_test.go @@ -424,7 +424,14 @@ var _ = Describe("DBProvider", func() { By("connecting to the worker") fakeDBWorkerFactory.GetWorkerReturns(fakeWorker1, true, nil) - container, err := workers[0].FindOrCreateContainer(context.TODO(), logger, fakeImageFetchingDelegate, db.NewBuildStepContainerOwner(42, atc.PlanID("some-plan-id"), 1), db.ContainerMetadata{}, containerSpec, nil) + container, err := workers[0].FindOrCreateContainer( + context.TODO(), + logger, + fakeImageFetchingDelegate, + db.NewBuildStepContainerOwner(42, atc.PlanID("some-plan-id"), 1), + containerSpec, + nil, + ) Expect(err).NotTo(HaveOccurred()) err = container.Destroy() @@ -463,6 +470,7 @@ var _ = Describe("DBProvider", func() { fakeCreatedContainer = new(dbfakes.FakeCreatedContainer) fakeCreatingContainer.CreatedReturns(fakeCreatedContainer, nil) fakeWorker1.CreateContainerReturns(fakeCreatingContainer, nil) + fakeWorker1.FindContainerReturns(fakeCreatingContainer, nil, nil) workerBaseResourceType := &db.UsedWorkerBaseResourceType{ID: 42} fakeDBWorkerBaseResourceTypeFactory.FindReturns(workerBaseResourceType, true, nil) @@ -481,7 +489,22 @@ var _ = Describe("DBProvider", func() { fakeGardenBackend.CreateReturns(fakeContainer, nil) fakeGardenBackend.LookupReturns(fakeContainer, nil) - container, err := workers[0].FindOrCreateContainer(context.TODO(), logger, fakeImageFetchingDelegate, db.NewBuildStepContainerOwner(42, atc.PlanID("some-plan-id"), 1), db.ContainerMetadata{}, containerSpec, nil) + err := workers[0].EnsureDBContainerExists( + context.TODO(), + logger, + db.NewBuildStepContainerOwner(42, atc.PlanID("some-plan-id"), 1), + db.ContainerMetadata{}, + ) + Expect(err).NotTo(HaveOccurred()) + + container, err := workers[0].FindOrCreateContainer( + context.TODO(), + logger, + fakeImageFetchingDelegate, + db.NewBuildStepContainerOwner(42, atc.PlanID("some-plan-id"), 1), + containerSpec, + nil, + ) Expect(err).NotTo(HaveOccurred()) Expect(container.Handle()).To(Equal("created-handle")) diff --git a/atc/worker/image/image_factory.go b/atc/worker/image/image_factory.go index d207f032c..f522567a4 100644 --- a/atc/worker/image/image_factory.go +++ b/atc/worker/image/image_factory.go @@ -5,7 +5,6 @@ import ( "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/worker" w "github.com/concourse/concourse/atc/worker" ) @@ -31,7 +30,7 @@ func (f *imageFactory) GetImage( imageSpec worker.ImageSpec, teamID int, delegate worker.ImageFetchingDelegate, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, ) (worker.Image, error) { if imageSpec.ImageArtifactSource != nil { artifactVolume, existsOnWorker, err := imageSpec.ImageArtifactSource.VolumeOn(logger, worker) diff --git a/atc/worker/image/image_resource_fetcher.go b/atc/worker/image/image_resource_fetcher.go index 9ee5f577b..a58c7b6fd 100644 --- a/atc/worker/image/image_resource_fetcher.go +++ b/atc/worker/image/image_resource_fetcher.go @@ -10,7 +10,6 @@ import ( "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/resource" "github.com/concourse/concourse/atc/worker" @@ -32,7 +31,7 @@ type ImageResourceFetcherFactory interface { worker.ImageResource, atc.Version, int, - creds.VersionedResourceTypes, + atc.VersionedResourceTypes, worker.ImageFetchingDelegate, ) ImageResourceFetcher } @@ -74,7 +73,7 @@ func (f *imageResourceFetcherFactory) NewImageResourceFetcher( imageResource worker.ImageResource, version atc.Version, teamID int, - customTypes creds.VersionedResourceTypes, + customTypes atc.VersionedResourceTypes, imageFetchingDelegate worker.ImageFetchingDelegate, ) ImageResourceFetcher { return &imageResourceFetcher{ @@ -102,7 +101,7 @@ type imageResourceFetcher struct { imageResource worker.ImageResource version atc.Version teamID int - customTypes creds.VersionedResourceTypes + customTypes atc.VersionedResourceTypes imageFetchingDelegate worker.ImageFetchingDelegate } @@ -122,22 +121,16 @@ func (i *imageResourceFetcher) Fetch( } } - source, err := i.imageResource.Source.Evaluate() - if err != nil { - return nil, nil, nil, err - } - var params atc.Params if i.imageResource.Params != nil { params = *i.imageResource.Params } resourceCache, err := i.dbResourceCacheFactory.FindOrCreateResourceCache( - logger, db.ForContainer(container.ID()), i.imageResource.Type, version, - source, + i.imageResource.Source, params, i.customTypes, ) @@ -149,7 +142,7 @@ func (i *imageResourceFetcher) Fetch( resourceInstance := resource.NewResourceInstance( resource.ResourceType(i.imageResource.Type), version, - source, + i.imageResource.Source, params, i.customTypes, resourceCache, @@ -225,7 +218,7 @@ func (i *imageResourceFetcher) ensureVersionOfType( ctx context.Context, logger lager.Logger, container db.CreatingContainer, - resourceType creds.VersionedResourceType, + resourceType atc.VersionedResourceType, ) error { containerSpec := worker.ContainerSpec{ ImageSpec: worker.ImageSpec{ @@ -237,14 +230,24 @@ func (i *imageResourceFetcher) ensureVersionOfType( }, } + owner := db.NewImageCheckContainerOwner(container, i.teamID) + err := i.worker.EnsureDBContainerExists( + ctx, + logger, + owner, + db.ContainerMetadata{ + Type: db.ContainerTypeCheck, + }, + ) + if err != nil { + return err + } + resourceTypeContainer, err := i.worker.FindOrCreateContainer( ctx, logger, worker.NoopImageFetchingDelegate{}, - db.NewImageCheckContainerOwner(container, i.teamID), - db.ContainerMetadata{ - Type: db.ContainerTypeCheck, - }, + owner, containerSpec, i.customTypes, ) @@ -252,13 +255,8 @@ func (i *imageResourceFetcher) ensureVersionOfType( return err } - source, err := resourceType.Source.Evaluate() - if err != nil { - return err - } - checkResourceType := i.resourceFactory.NewResourceForContainer(resourceTypeContainer) - versions, err := checkResourceType.Check(context.TODO(), source, nil) + versions, err := checkResourceType.Check(context.TODO(), resourceType.Source, nil) if err != nil { return err } @@ -298,7 +296,15 @@ func (i *imageResourceFetcher) getLatestVersion( }, } - source, err := i.imageResource.Source.Evaluate() + owner := db.NewImageCheckContainerOwner(container, i.teamID) + err := i.worker.EnsureDBContainerExists( + ctx, + logger, + owner, + db.ContainerMetadata{ + Type: db.ContainerTypeCheck, + }, + ) if err != nil { return nil, err } @@ -307,10 +313,7 @@ func (i *imageResourceFetcher) getLatestVersion( ctx, logger, i.imageFetchingDelegate, - db.NewImageCheckContainerOwner(container, i.teamID), - db.ContainerMetadata{ - Type: db.ContainerTypeCheck, - }, + owner, resourceSpec, i.customTypes, ) @@ -319,7 +322,7 @@ func (i *imageResourceFetcher) getLatestVersion( } checkingResource := i.resourceFactory.NewResourceForContainer(imageContainer) - versions, err := checkingResource.Check(context.TODO(), source, nil) + versions, err := checkingResource.Check(context.TODO(), i.imageResource.Source, nil) if err != nil { return nil, err } diff --git a/atc/worker/image/image_resource_fetcher_test.go b/atc/worker/image/image_resource_fetcher_test.go index e4bbd6758..a436fd3c9 100644 --- a/atc/worker/image/image_resource_fetcher_test.go +++ b/atc/worker/image/image_resource_fetcher_test.go @@ -12,9 +12,7 @@ import ( "code.cloudfoundry.org/lager" "code.cloudfoundry.org/lager/lagertest" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/dbfakes" "github.com/concourse/concourse/atc/resource" @@ -46,7 +44,7 @@ var _ = Describe("Image", func() { var fakeImageFetchingDelegate *workerfakes.FakeImageFetchingDelegate var fakeWorker *workerfakes.FakeWorker - var customTypes creds.VersionedResourceTypes + var customTypes atc.VersionedResourceTypes var privileged bool var fetchedVolume worker.Volume @@ -54,7 +52,6 @@ var _ = Describe("Image", func() { var fetchedVersion atc.Version var fetchErr error var teamID int - var variables template.StaticVariables BeforeEach(func() { fakeResourceFactory = new(resourcefakes.FakeResourceFactory) @@ -63,16 +60,10 @@ var _ = Describe("Image", func() { fakeCreatingContainer = new(dbfakes.FakeCreatingContainer) stderrBuf = gbytes.NewBuffer() - variables = template.StaticVariables{ - "source-param": "super-secret-sauce", - "a-source-param": "super-secret-a-source", - "b-source-param": "super-secret-b-source", - } - logger = lagertest.NewTestLogger("test") imageResource = worker.ImageResource{ Type: "docker", - Source: creds.NewSource(variables, atc.Source{"some": "((source-param))"}), + Source: atc.Source{"some": "super-secret-sauce"}, Params: &atc.Params{"some": "params"}, } version = nil @@ -84,12 +75,12 @@ var _ = Describe("Image", func() { fakeWorker.TagsReturns(atc.Tags{"worker", "tags"}) teamID = 123 - customTypes = creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + customTypes = atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "custom-type-a", Type: "base-type", - Source: atc.Source{"some": "((a-source-param))"}, + Source: atc.Source{"some": "a-source-param"}, }, Version: atc.Version{"some": "a-version"}, }, @@ -97,11 +88,11 @@ var _ = Describe("Image", func() { ResourceType: atc.ResourceType{ Name: "custom-type-b", Type: "custom-type-a", - Source: atc.Source{"some": "((b-source-param))"}, + Source: atc.Source{"some": "b-source-param"}, }, Version: atc.Version{"some": "b-version"}, }, - }) + } fakeResourceCacheFactory = new(dbfakes.FakeResourceCacheFactory) }) @@ -158,7 +149,7 @@ var _ = Describe("Image", func() { BeforeEach(func() { imageResource = worker.ImageResource{ Type: customResourceTypeName, - Source: creds.NewSource(variables, atc.Source{"some": "((source-param))"}), + Source: atc.Source{"some": "source-param"}, Params: &atc.Params{"some": "params"}, } @@ -172,7 +163,7 @@ var _ = Describe("Image", func() { Context("and the custom type does not have a version", func() { BeforeEach(func() { - customTypes = creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + customTypes = atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "custom-type-a", @@ -181,7 +172,7 @@ var _ = Describe("Image", func() { }, Version: nil, }, - }) + } fakeCheckResourceType = new(resourcefakes.FakeResource) fakeWorker.FindOrCreateContainerReturns(fakeContainer, nil) @@ -192,7 +183,7 @@ var _ = Describe("Image", func() { It("checks for the latest version of the resource type", func() { By("find or create a resource container") - _, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) + _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0) Expect(containerSpec.ImageSpec.ResourceType).To(Equal("custom-type-a")) By("calling the resource type's check script") @@ -205,8 +196,9 @@ var _ = Describe("Image", func() { }) It("uses the version of the custom type when checking for the original resource", func() { + Expect(fakeWorker.EnsureDBContainerExistsCallCount()).To(Equal(2)) Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(2)) - _, _, _, _, _, containerSpec, customTypes := fakeWorker.FindOrCreateContainerArgsForCall(1) + _, _, _, _, containerSpec, customTypes := fakeWorker.FindOrCreateContainerArgsForCall(1) Expect(containerSpec.ImageSpec.ResourceType).To(Equal("custom-type-a")) Expect(customTypes[0].Version).To(Equal(atc.Version{"some": "version"})) }) @@ -277,13 +269,17 @@ var _ = Describe("Image", func() { }) It("created the 'check' resource with the correct session, with the currently fetching type removed from the set", func() { - Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - cctx, _, delegate, owner, metadata, containerSpec, actualCustomTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) - Expect(cctx).To(Equal(ctx)) - Expect(owner).To(Equal(db.NewImageCheckContainerOwner(fakeCreatingContainer, 123))) + Expect(fakeWorker.EnsureDBContainerExistsCallCount()).To(Equal(1)) + _, _, owner, metadata := fakeWorker.EnsureDBContainerExistsArgsForCall(0) Expect(metadata).To(Equal(db.ContainerMetadata{ Type: db.ContainerTypeCheck, })) + Expect(owner).To(Equal(db.NewImageCheckContainerOwner(fakeCreatingContainer, 123))) + + Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) + cctx, _, delegate, owner, containerSpec, actualCustomTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) + Expect(cctx).To(Equal(ctx)) + Expect(owner).To(Equal(db.NewImageCheckContainerOwner(fakeCreatingContainer, 123))) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "docker", })) @@ -318,13 +314,17 @@ var _ = Describe("Image", func() { }) It("created the 'check' resource with the correct session, with the currently fetching type removed from the set", func() { - Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) - cctx, _, delegate, owner, metadata, containerSpec, actualCustomTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) - Expect(cctx).To(Equal(ctx)) - Expect(owner).To(Equal(db.NewImageCheckContainerOwner(fakeCreatingContainer, 123))) + Expect(fakeWorker.EnsureDBContainerExistsCallCount()).To(Equal(1)) + _, _, owner, metadata := fakeWorker.EnsureDBContainerExistsArgsForCall(0) Expect(metadata).To(Equal(db.ContainerMetadata{ Type: db.ContainerTypeCheck, })) + Expect(owner).To(Equal(db.NewImageCheckContainerOwner(fakeCreatingContainer, 123))) + + Expect(fakeWorker.FindOrCreateContainerCallCount()).To(Equal(1)) + cctx, _, delegate, owner, containerSpec, actualCustomTypes := fakeWorker.FindOrCreateContainerArgsForCall(0) + Expect(cctx).To(Equal(ctx)) + Expect(owner).To(Equal(db.NewImageCheckContainerOwner(fakeCreatingContainer, 123))) Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{ ResourceType: "docker", })) @@ -537,6 +537,7 @@ var _ = Describe("Image", func() { }) It("does not construct a new resource for checking", func() { + Expect(fakeWorker.EnsureDBContainerExistsCallCount()).To(BeZero()) Expect(fakeWorker.FindOrCreateContainerCallCount()).To(BeZero()) Expect(fakeResourceFactory.NewResourceForContainerCallCount()).To(BeZero()) }) diff --git a/atc/worker/image/image_test.go b/atc/worker/image/image_test.go index 212ece3d0..8c0f54671 100644 --- a/atc/worker/image/image_test.go +++ b/atc/worker/image/image_test.go @@ -6,11 +6,9 @@ import ( "strings" "code.cloudfoundry.org/lager/lagertest" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/baggageclaim" "github.com/concourse/baggageclaim/baggageclaimfakes" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db/dbfakes" "github.com/concourse/concourse/atc/worker" "github.com/concourse/concourse/atc/worker/image" @@ -32,7 +30,6 @@ var _ = Describe("Image", func() { fakeImageFetchingDelegate *workerfakes.FakeImageFetchingDelegate fakeImageResourceFetcherFactory *imagefakes.FakeImageResourceFetcherFactory fakeImageResourceFetcher *imagefakes.FakeImageResourceFetcher - variables creds.Variables ) BeforeEach(func() { @@ -49,10 +46,6 @@ var _ = Describe("Image", func() { fakeImageResourceFetcher = new(imagefakes.FakeImageResourceFetcher) fakeImageResourceFetcherFactory.NewImageResourceFetcherReturns(fakeImageResourceFetcher) imageFactory = image.NewImageFactory(fakeImageResourceFetcherFactory) - - variables = template.StaticVariables{ - "source-secret": "super-secret-sauce", - } }) Describe("imageProvidedByPreviousStepOnSameWorker", func() { @@ -88,7 +81,7 @@ var _ = Describe("Image", func() { }, 42, fakeImageFetchingDelegate, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) }) @@ -154,7 +147,7 @@ var _ = Describe("Image", func() { }, 42, fakeImageFetchingDelegate, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) }) @@ -237,13 +230,13 @@ var _ = Describe("Image", func() { worker.ImageSpec{ ImageResource: &worker.ImageResource{ Type: "some-image-resource-type", - Source: creds.NewSource(variables, atc.Source{"some": "source"}), + Source: atc.Source{"some": "source"}, }, Privileged: true, }, 42, fakeImageFetchingDelegate, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) }) @@ -252,10 +245,10 @@ var _ = Describe("Image", func() { worker, imageResource, version, teamID, resourceTypes, delegate := fakeImageResourceFetcherFactory.NewImageResourceFetcherArgsForCall(0) Expect(worker).To(Equal(fakeWorker)) Expect(imageResource.Type).To(Equal("some-image-resource-type")) - Expect(imageResource.Source).To(Equal(creds.NewSource(variables, atc.Source{"some": "source"}))) + Expect(imageResource.Source).To(Equal(atc.Source{"some": "source"})) Expect(version).To(BeNil()) Expect(teamID).To(Equal(42)) - Expect(resourceTypes).To(Equal(creds.VersionedResourceTypes{})) + Expect(resourceTypes).To(Equal(atc.VersionedResourceTypes{})) Expect(delegate).To(Equal(fakeImageFetchingDelegate)) }) @@ -302,7 +295,7 @@ var _ = Describe("Image", func() { }, 42, fakeImageFetchingDelegate, - creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "some-custom-resource-type", @@ -324,7 +317,7 @@ var _ = Describe("Image", func() { }, Version: atc.Version{"some": "custom-image-resource-type-version"}, }, - }), + }, ) Expect(err).NotTo(HaveOccurred()) }) @@ -333,12 +326,12 @@ var _ = Describe("Image", func() { worker, imageResource, version, teamID, resourceTypes, delegate := fakeImageResourceFetcherFactory.NewImageResourceFetcherArgsForCall(0) Expect(worker).To(Equal(fakeWorker)) Expect(imageResource.Type).To(Equal("some-base-resource-type")) - Expect(imageResource.Source).To(Equal(creds.NewSource(variables, atc.Source{ + Expect(imageResource.Source).To(Equal(atc.Source{ "some": "custom-resource-type-source", - }))) + })) Expect(version).To(Equal(atc.Version{"some": "custom-resource-type-version"})) Expect(teamID).To(Equal(42)) - Expect(resourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + Expect(resourceTypes).To(Equal(atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "some-custom-image-resource-type", @@ -350,7 +343,7 @@ var _ = Describe("Image", func() { }, Version: atc.Version{"some": "custom-image-resource-type-version"}, }, - }))) + })) Expect(delegate).To(Equal(fakeImageFetchingDelegate)) }) @@ -397,7 +390,7 @@ var _ = Describe("Image", func() { }, 42, fakeImageFetchingDelegate, - creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "some-custom-resource-type", @@ -419,7 +412,7 @@ var _ = Describe("Image", func() { }, Version: atc.Version{"some": "custom-image-resource-type-version"}, }, - }), + }, ) Expect(err).NotTo(HaveOccurred()) }) @@ -428,12 +421,12 @@ var _ = Describe("Image", func() { worker, imageResource, version, teamID, resourceTypes, delegate := fakeImageResourceFetcherFactory.NewImageResourceFetcherArgsForCall(0) Expect(worker).To(Equal(fakeWorker)) Expect(imageResource.Type).To(Equal("some-base-image-resource-type")) - Expect(imageResource.Source).To(Equal(creds.NewSource(variables, atc.Source{ + Expect(imageResource.Source).To(Equal(atc.Source{ "some": "custom-image-resource-type-source", - }))) + })) Expect(version).To(Equal(atc.Version{"some": "custom-image-resource-type-version"})) Expect(teamID).To(Equal(42)) - Expect(resourceTypes).To(Equal(creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + Expect(resourceTypes).To(Equal(atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "some-custom-resource-type", @@ -444,7 +437,7 @@ var _ = Describe("Image", func() { }, Version: atc.Version{"some": "custom-resource-type-version"}, }, - }))) + })) Expect(delegate).To(Equal(fakeImageFetchingDelegate)) }) @@ -519,7 +512,7 @@ var _ = Describe("Image", func() { }, 42, fakeImageFetchingDelegate, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) }) @@ -628,7 +621,7 @@ var _ = Describe("Image", func() { }, 42, fakeImageFetchingDelegate, - creds.VersionedResourceTypes{}, + atc.VersionedResourceTypes{}, ) Expect(err).NotTo(HaveOccurred()) }) diff --git a/atc/worker/image/imagefakes/fake_image_resource_fetcher_factory.go b/atc/worker/image/imagefakes/fake_image_resource_fetcher_factory.go index b116b666e..3b56db54a 100644 --- a/atc/worker/image/imagefakes/fake_image_resource_fetcher_factory.go +++ b/atc/worker/image/imagefakes/fake_image_resource_fetcher_factory.go @@ -5,20 +5,19 @@ import ( "sync" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/worker" "github.com/concourse/concourse/atc/worker/image" ) type FakeImageResourceFetcherFactory struct { - NewImageResourceFetcherStub func(worker.Worker, worker.ImageResource, atc.Version, int, creds.VersionedResourceTypes, worker.ImageFetchingDelegate) image.ImageResourceFetcher + NewImageResourceFetcherStub func(worker.Worker, worker.ImageResource, atc.Version, int, atc.VersionedResourceTypes, worker.ImageFetchingDelegate) image.ImageResourceFetcher newImageResourceFetcherMutex sync.RWMutex newImageResourceFetcherArgsForCall []struct { arg1 worker.Worker arg2 worker.ImageResource arg3 atc.Version arg4 int - arg5 creds.VersionedResourceTypes + arg5 atc.VersionedResourceTypes arg6 worker.ImageFetchingDelegate } newImageResourceFetcherReturns struct { @@ -31,7 +30,7 @@ type FakeImageResourceFetcherFactory struct { invocationsMutex sync.RWMutex } -func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcher(arg1 worker.Worker, arg2 worker.ImageResource, arg3 atc.Version, arg4 int, arg5 creds.VersionedResourceTypes, arg6 worker.ImageFetchingDelegate) image.ImageResourceFetcher { +func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcher(arg1 worker.Worker, arg2 worker.ImageResource, arg3 atc.Version, arg4 int, arg5 atc.VersionedResourceTypes, arg6 worker.ImageFetchingDelegate) image.ImageResourceFetcher { fake.newImageResourceFetcherMutex.Lock() ret, specificReturn := fake.newImageResourceFetcherReturnsOnCall[len(fake.newImageResourceFetcherArgsForCall)] fake.newImageResourceFetcherArgsForCall = append(fake.newImageResourceFetcherArgsForCall, struct { @@ -39,7 +38,7 @@ func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcher(arg1 worker arg2 worker.ImageResource arg3 atc.Version arg4 int - arg5 creds.VersionedResourceTypes + arg5 atc.VersionedResourceTypes arg6 worker.ImageFetchingDelegate }{arg1, arg2, arg3, arg4, arg5, arg6}) fake.recordInvocation("NewImageResourceFetcher", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6}) @@ -60,13 +59,13 @@ func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherCallCount() return len(fake.newImageResourceFetcherArgsForCall) } -func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherCalls(stub func(worker.Worker, worker.ImageResource, atc.Version, int, creds.VersionedResourceTypes, worker.ImageFetchingDelegate) image.ImageResourceFetcher) { +func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherCalls(stub func(worker.Worker, worker.ImageResource, atc.Version, int, atc.VersionedResourceTypes, worker.ImageFetchingDelegate) image.ImageResourceFetcher) { fake.newImageResourceFetcherMutex.Lock() defer fake.newImageResourceFetcherMutex.Unlock() fake.NewImageResourceFetcherStub = stub } -func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherArgsForCall(i int) (worker.Worker, worker.ImageResource, atc.Version, int, creds.VersionedResourceTypes, worker.ImageFetchingDelegate) { +func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherArgsForCall(i int) (worker.Worker, worker.ImageResource, atc.Version, int, atc.VersionedResourceTypes, worker.ImageFetchingDelegate) { fake.newImageResourceFetcherMutex.RLock() defer fake.newImageResourceFetcherMutex.RUnlock() argsForCall := fake.newImageResourceFetcherArgsForCall[i] diff --git a/atc/worker/image_factory.go b/atc/worker/image_factory.go index c915307f0..3e23ed24e 100644 --- a/atc/worker/image_factory.go +++ b/atc/worker/image_factory.go @@ -7,7 +7,6 @@ import ( "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" ) @@ -21,7 +20,7 @@ type ImageFactory interface { imageSpec ImageSpec, teamID int, delegate ImageFetchingDelegate, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, ) (Image, error) } diff --git a/atc/worker/pool.go b/atc/worker/pool.go index cc91287a8..059663fd6 100644 --- a/atc/worker/pool.go +++ b/atc/worker/pool.go @@ -1,6 +1,7 @@ package worker import ( + "context" "errors" "fmt" "math/rand" @@ -9,6 +10,7 @@ import ( "code.cloudfoundry.org/clock" "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc/db" + "github.com/concourse/concourse/atc/db/lock" ) //go:generate counterfeiter . WorkerProvider @@ -42,7 +44,8 @@ type WorkerProvider interface { } var ( - ErrNoWorkers = errors.New("no workers") + ErrNoWorkers = errors.New("no workers") + ErrFailedAcquirePoolLock = errors.New("failed to acquire pool lock") ) type NoCompatibleWorkersError struct { @@ -57,9 +60,11 @@ func (err NoCompatibleWorkersError) Error() string { type Pool interface { FindOrChooseWorkerForContainer( + context.Context, lager.Logger, db.ContainerOwner, ContainerSpec, + db.ContainerMetadata, WorkerSpec, ContainerPlacementStrategy, ) (Worker, error) @@ -68,18 +73,30 @@ type Pool interface { lager.Logger, WorkerSpec, ) (Worker, error) + + AcquireContainerCreatingLock( + logger lager.Logger, + ) (lock.Lock, bool, error) } type pool struct { - provider WorkerProvider + clock clock.Clock + lockFactory lock.LockFactory + provider WorkerProvider rand *rand.Rand } -func NewPool(provider WorkerProvider) Pool { +func NewPool( + clock clock.Clock, + lockFactory lock.LockFactory, + provider WorkerProvider, +) Pool { return &pool{ - provider: provider, - rand: rand.New(rand.NewSource(time.Now().UnixNano())), + clock: clock, + lockFactory: lockFactory, + provider: provider, + rand: rand.New(rand.NewSource(time.Now().UnixNano())), } } @@ -120,9 +137,11 @@ func (pool *pool) allSatisfying(logger lager.Logger, spec WorkerSpec) ([]Worker, } func (pool *pool) FindOrChooseWorkerForContainer( + ctx context.Context, logger lager.Logger, owner db.ContainerOwner, containerSpec ContainerSpec, + metadata db.ContainerMetadata, workerSpec WorkerSpec, strategy ContainerPlacementStrategy, ) (Worker, error) { @@ -150,16 +169,43 @@ dance: } } - if worker == nil { - worker, err = strategy.Choose(logger, compatibleWorkers, containerSpec) + // pool is shared by all steps running in the system, + // lock around worker placement strategies so decisions + // are serialized and valid at the time of creating + // containers in garden + for { + lock, acquired, err := pool.AcquireContainerCreatingLock(logger) + if err != nil { + return nil, ErrFailedAcquirePoolLock + } + + if !acquired { + pool.clock.Sleep(time.Second) + continue + } + defer lock.Release() + + if worker == nil { + worker, err = strategy.Choose(logger, compatibleWorkers, containerSpec) + if err != nil { + return nil, err + } + } + + err = worker.EnsureDBContainerExists(nil, logger, owner, metadata) if err != nil { return nil, err } + break } return worker, nil } +func (pool *pool) AcquireContainerCreatingLock(logger lager.Logger) (lock.Lock, bool, error) { + return pool.lockFactory.Acquire(logger, lock.NewContainerCreatingLockID()) +} + func (pool *pool) FindOrChooseWorker( logger lager.Logger, workerSpec WorkerSpec, diff --git a/atc/worker/pool_test.go b/atc/worker/pool_test.go index 5c520cb73..917a13bc5 100644 --- a/atc/worker/pool_test.go +++ b/atc/worker/pool_test.go @@ -1,14 +1,18 @@ package worker_test import ( + "context" "errors" + "time" + "code.cloudfoundry.org/clock/fakeclock" "code.cloudfoundry.org/lager" "code.cloudfoundry.org/lager/lagertest" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" + "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/dbfakes" + "github.com/concourse/concourse/atc/db/lock" + "github.com/concourse/concourse/atc/db/lock/lockfakes" . "github.com/concourse/concourse/atc/worker" "github.com/concourse/concourse/atc/worker/workerfakes" . "github.com/onsi/ginkgo" @@ -17,24 +21,29 @@ import ( var _ = Describe("Pool", func() { var ( - logger *lagertest.TestLogger - fakeProvider *workerfakes.FakeWorkerProvider - pool Pool + fakeClock *fakeclock.FakeClock + logger *lagertest.TestLogger + fakeProvider *workerfakes.FakeWorkerProvider + fakeLockFactory *lockfakes.FakeLockFactory + pool Pool ) BeforeEach(func() { logger = lagertest.NewTestLogger("test") fakeProvider = new(workerfakes.FakeWorkerProvider) + fakeLockFactory = new(lockfakes.FakeLockFactory) + fakeClock = fakeclock.NewFakeClock(time.Unix(123, 456)) - pool = NewPool(fakeProvider) + pool = NewPool(fakeClock, fakeLockFactory, fakeProvider) }) Describe("FindOrChooseWorkerForContainer", func() { var ( spec ContainerSpec workerSpec WorkerSpec - resourceTypes creds.VersionedResourceTypes + resourceTypes atc.VersionedResourceTypes fakeOwner *dbfakes.FakeContainerOwner + fakeLock *lockfakes.FakeLock chosenWorker Worker chooseErr error @@ -84,20 +93,16 @@ var _ = Describe("Pool", func() { }, } - variables := template.StaticVariables{ - "secret-source": "super-secret-source", - } - - resourceTypes = creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + resourceTypes = atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "custom-type-b", Type: "custom-type-a", - Source: atc.Source{"some": "((secret-source))"}, + Source: atc.Source{"some": "super-secret-source"}, }, Version: atc.Version{"some": "version"}, }, - }) + } workerSpec = WorkerSpec{ ResourceType: "some-type", @@ -111,18 +116,84 @@ var _ = Describe("Pool", func() { compatibleWorker = new(workerfakes.FakeWorker) compatibleWorker.SatisfiesReturns(true) + + fakeLock = new(lockfakes.FakeLock) + fakeLockFactory.AcquireReturns(fakeLock, true, nil) }) JustBeforeEach(func() { chosenWorker, chooseErr = pool.FindOrChooseWorkerForContainer( + context.TODO(), logger, fakeOwner, spec, + db.ContainerMetadata{}, workerSpec, fakeStrategy, ) }) + Context("selects a worker in serial", func() { + var ( + workerA *workerfakes.FakeWorker + ) + + BeforeEach(func() { + workerA = new(workerfakes.FakeWorker) + workerA.NameReturns("workerA") + workerA.SatisfiesReturns(true) + + fakeProvider.FindWorkersForContainerByOwnerReturns([]Worker{workerA}, nil) + fakeProvider.RunningWorkersReturns([]Worker{workerA}, nil) + fakeStrategy.ChooseReturns(workerA, nil) + }) + + Context("fails to acquire the pool lock", func() { + BeforeEach(func() { + fakeLockFactory.AcquireReturns(nil, false, ErrFailedAcquirePoolLock) + }) + + It("returns an error", func() { + Expect(fakeLockFactory.AcquireCallCount()).To(Equal(1)) + fakeLockFactory.AcquireReturns(nil, false, ErrFailedAcquirePoolLock) + Expect(chooseErr).To(HaveOccurred()) + Expect(chooseErr.Error()).To(Equal("failed to acquire pool lock")) + }) + }) + + Context("lock is held by another", func() { + BeforeEach(func() { + callCount := 0 + fakeLockFactory.AcquireStub = func(logger lager.Logger, lockID lock.LockID) (lock.Lock, bool, error) { + callCount++ + go fakeClock.WaitForWatcherAndIncrement(time.Second) + + if callCount < 3 { + return nil, false, nil + } + + return fakeLock, true, nil + } + }) + + It("retries every second until it is", func() { + Expect(fakeLockFactory.AcquireCallCount()).To(Equal(3)) + Expect(fakeLock.ReleaseCallCount()).To(Equal(1)) + }) + }) + + Context("lock is not held by anyone", func() { + BeforeEach(func() { + fakeLockFactory.AcquireReturns(fakeLock, true, nil) + }) + + It("acquires the lock", func() { + Expect(fakeLockFactory.AcquireCallCount()).To(Equal(1)) + Expect(chooseErr).ToNot(HaveOccurred()) + }) + }) + }) + Context("when workers are found with the container", func() { var ( workerA *workerfakes.FakeWorker @@ -133,6 +204,7 @@ var _ = Describe("Pool", func() { BeforeEach(func() { workerA = new(workerfakes.FakeWorker) workerA.NameReturns("workerA") + workerA.SatisfiesReturns(true) workerB = new(workerfakes.FakeWorker) workerB.NameReturns("workerB") workerC = new(workerfakes.FakeWorker) @@ -143,6 +215,10 @@ var _ = Describe("Pool", func() { fakeStrategy.ChooseReturns(workerA, nil) }) + It("ensures a db container exists", func() { + Expect(workerA.EnsureDBContainerExistsCallCount()).To(Equal(1)) + }) + Context("when one of the workers satisfy the spec", func() { BeforeEach(func() { workerA.SatisfiesReturns(true) @@ -235,6 +311,7 @@ var _ = Describe("Pool", func() { workerA = new(workerfakes.FakeWorker) workerB = new(workerfakes.FakeWorker) workerC = new(workerfakes.FakeWorker) + workerA.NameReturns("workerA") workerA.SatisfiesReturns(true) workerB.SatisfiesReturns(true) @@ -244,6 +321,10 @@ var _ = Describe("Pool", func() { fakeStrategy.ChooseReturns(workerA, nil) }) + It("ensures a db container exists", func() { + Expect(workerA.EnsureDBContainerExistsCallCount()).To(Equal(1)) + }) + It("checks that the workers satisfy the given worker spec", func() { Expect(workerA.SatisfiesCallCount()).To(Equal(1)) _, actualSpec := workerA.SatisfiesArgsForCall(0) @@ -390,6 +471,9 @@ var _ = Describe("Pool", func() { fakeStrategy.ChooseReturns(compatibleWorker, nil) }) + It("ensures a db container exists", func() { + Expect(compatibleWorker.EnsureDBContainerExistsCallCount()).To(Equal(1)) + }) It("chooses a worker", func() { Expect(chooseErr).ToNot(HaveOccurred()) Expect(fakeStrategy.ChooseCallCount()).To(Equal(1)) diff --git a/atc/worker/worker.go b/atc/worker/worker.go index e1d2bcfbe..4192a79fa 100644 --- a/atc/worker/worker.go +++ b/atc/worker/worker.go @@ -2,6 +2,7 @@ package worker import ( "context" + "errors" "fmt" "path/filepath" "sort" @@ -14,13 +15,14 @@ import ( "code.cloudfoundry.org/garden" "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/cppforlife/go-semi-semantic/version" ) const userPropertyName = "user" +var ResourceConfigCheckSessionExpiredError = errors.New("no db container was found for owner") + //go:generate counterfeiter . Worker type Worker interface { @@ -36,15 +38,21 @@ type Worker interface { IsVersionCompatible(lager.Logger, version.Version) bool Satisfies(lager.Logger, WorkerSpec) bool + EnsureDBContainerExists( + context.Context, + lager.Logger, + db.ContainerOwner, + db.ContainerMetadata, + ) error + FindContainerByHandle(lager.Logger, int, string) (Container, bool, error) FindOrCreateContainer( context.Context, lager.Logger, ImageFetchingDelegate, db.ContainerOwner, - db.ContainerMetadata, ContainerSpec, - creds.VersionedResourceTypes, + atc.VersionedResourceTypes, ) (Container, error) FindVolumeForResourceCache(logger lager.Logger, resourceCache db.UsedResourceCache) (Volume, bool, error) @@ -170,9 +178,8 @@ func (worker *gardenWorker) FindOrCreateContainer( logger lager.Logger, delegate ImageFetchingDelegate, owner db.ContainerOwner, - metadata db.ContainerMetadata, containerSpec ContainerSpec, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, ) (Container, error) { var ( @@ -183,11 +190,18 @@ func (worker *gardenWorker) FindOrCreateContainer( err error ) - creatingContainer, createdContainer, containerHandle, err = worker.helper.findOrInitializeContainer(logger, owner, metadata) + creatingContainer, createdContainer, err = worker.dbWorker.FindContainer(owner) if err != nil { logger.Error("failed-to-find-container-in-db", err) return nil, err } + if creatingContainer != nil { + containerHandle = creatingContainer.Handle() + } else if createdContainer != nil { + containerHandle = createdContainer.Handle() + } else { + return nil, ResourceConfigCheckSessionExpiredError + } gardenContainer, err = worker.gardenClient.Lookup(containerHandle) if err != nil { @@ -202,7 +216,7 @@ func (worker *gardenWorker) FindOrCreateContainer( logger.Debug("found-created-container-in-db") if gardenContainer == nil { - return nil, garden.ContainerNotFoundError{containerHandle} + return nil, garden.ContainerNotFoundError{Handle: containerHandle} } return worker.helper.constructGardenWorkerContainer( logger, @@ -212,8 +226,15 @@ func (worker *gardenWorker) FindOrCreateContainer( } if gardenContainer == nil { - - fetchedImage, err := worker.fetchImageForContainer(ctx, logger, containerSpec.ImageSpec, containerSpec.TeamID, delegate, resourceTypes, creatingContainer) + fetchedImage, err := worker.fetchImageForContainer( + ctx, + logger, + containerSpec.ImageSpec, + containerSpec.TeamID, + delegate, + resourceTypes, + creatingContainer, + ) if err != nil { creatingContainer.Failed() logger.Error("failed-to-fetch-image-for-container", err) @@ -294,13 +315,45 @@ func (worker *gardenWorker) getBindMounts(volumeMounts []VolumeMount, bindMountS return bindMounts, nil } +func (worker *gardenWorker) EnsureDBContainerExists( + ctx context.Context, + logger lager.Logger, + owner db.ContainerOwner, + metadata db.ContainerMetadata, +) error { + + creatingContainer, createdContainer, err := worker.dbWorker.FindContainer(owner) + if err != nil { + return err + } + + if creatingContainer != nil || createdContainer != nil { + return nil + } + + logger.Debug("creating-container-in-db") + creatingContainer, err = worker.dbWorker.CreateContainer( + owner, + metadata, + ) + if err != nil { + logger.Error("failed-to-create-container-in-db", err) + return err + } + + logger = logger.WithData(lager.Data{"container": creatingContainer.Handle()}) + logger.Debug("created-creating-container-in-db") + + return nil +} + func (worker *gardenWorker) fetchImageForContainer( ctx context.Context, logger lager.Logger, spec ImageSpec, teamID int, delegate ImageFetchingDelegate, - resourceTypes creds.VersionedResourceTypes, + resourceTypes atc.VersionedResourceTypes, creatingContainer db.CreatingContainer, ) (FetchedImage, error) { image, err := worker.imageFactory.GetImage( @@ -320,7 +373,12 @@ func (worker *gardenWorker) fetchImageForContainer( return image.FetchForContainer(ctx, logger, creatingContainer) } -func (worker *gardenWorker) createVolumes(logger lager.Logger, isPrivileged bool, creatingContainer db.CreatingContainer, spec ContainerSpec) ([]VolumeMount, error) { +func (worker *gardenWorker) createVolumes( + logger lager.Logger, + isPrivileged bool, + creatingContainer db.CreatingContainer, + spec ContainerSpec, +) ([]VolumeMount, error) { var volumeMounts []VolumeMount var ioVolumeMounts []VolumeMount @@ -550,8 +608,8 @@ func (worker *gardenWorker) Satisfies(logger lager.Logger, spec WorkerSpec) bool return true } -func determineUnderlyingTypeName(typeName string, resourceTypes creds.VersionedResourceTypes) string { - resourceTypesMap := make(map[string]creds.VersionedResourceType) +func determineUnderlyingTypeName(typeName string, resourceTypes atc.VersionedResourceTypes) string { + resourceTypesMap := make(map[string]atc.VersionedResourceType) for _, resourceType := range resourceTypes { resourceTypesMap[resourceType.Name] = resourceType } diff --git a/atc/worker/worker_helper.go b/atc/worker/worker_helper.go index 5878a3872..eebc92b38 100644 --- a/atc/worker/worker_helper.go +++ b/atc/worker/worker_helper.go @@ -17,49 +17,6 @@ type workerHelper struct { dbWorker db.Worker } -func (w workerHelper) findOrInitializeContainer( - logger lager.Logger, - owner db.ContainerOwner, - metadata db.ContainerMetadata, -) (db.CreatingContainer, db.CreatedContainer, string, error) { - - creatingContainer, createdContainer, err := w.dbWorker.FindContainerOnWorker(owner) - if err != nil { - return nil, nil, "", err - } - - var foundHandle string - switch { - case creatingContainer != nil: - foundHandle = creatingContainer.Handle() - case createdContainer != nil: - foundHandle = createdContainer.Handle() - } - - if foundHandle != "" { - logger = logger.WithData(lager.Data{"container": foundHandle}) - logger.Debug("found-container-in-db") - return creatingContainer, createdContainer, foundHandle, nil - } - - // No foundHandle means no container in db - logger.Debug("creating-container-in-db") - creatingContainer, err = w.dbWorker.CreateContainer( - owner, - metadata, - ) - if err != nil { - logger.Error("failed-to-create-container-in-db", err) - return nil, nil, "", err - } - - foundHandle = creatingContainer.Handle() - logger = logger.WithData(lager.Data{"container": foundHandle}) - logger.Debug("created-creating-container-in-db") - - return creatingContainer, nil, foundHandle, nil -} - func (w workerHelper) createGardenContainer( containerSpec ContainerSpec, fetchedImage FetchedImage, @@ -100,7 +57,6 @@ func (w workerHelper) createGardenContainer( }) } - func (w workerHelper) constructGardenWorkerContainer( logger lager.Logger, createdContainer db.CreatedContainer, @@ -143,7 +99,7 @@ func getDestinationPathsFromInputs(inputs []InputSource) []string { } func getDestinationPathsFromOutputs(outputs OutputPaths) []string { - idx := 0 + idx := 0 destinationPaths := make([]string, len(outputs)) for _, destinationPath := range outputs { diff --git a/atc/worker/worker_test.go b/atc/worker/worker_test.go index 4bb19ba0f..d635177fd 100644 --- a/atc/worker/worker_test.go +++ b/atc/worker/worker_test.go @@ -2,21 +2,20 @@ package worker_test import ( "bytes" - "code.cloudfoundry.org/garden" - "code.cloudfoundry.org/lager" - "fmt" - "github.com/concourse/baggageclaim" - "github.com/concourse/baggageclaim/baggageclaimfakes" - "io/ioutil" - "time" "context" "errors" + "fmt" + "io/ioutil" + "time" + + "code.cloudfoundry.org/garden" + "code.cloudfoundry.org/lager" + "github.com/concourse/baggageclaim" + "github.com/concourse/baggageclaim/baggageclaimfakes" "code.cloudfoundry.org/garden/gardenfakes" "code.cloudfoundry.org/lager/lagertest" - "github.com/cloudfoundry/bosh-cli/director/template" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/db/dbfakes" . "github.com/concourse/concourse/atc/worker" @@ -28,7 +27,7 @@ import ( var _ = Describe("Worker", func() { var ( - logger *lagertest.TestLogger + logger *lagertest.TestLogger fakeVolumeClient *workerfakes.FakeVolumeClient activeContainers int resourceTypes []atc.WorkerResourceType @@ -43,7 +42,7 @@ var _ = Describe("Worker", func() { fakeGardenClient *gardenfakes.FakeClient fakeImageFactory *workerfakes.FakeImageFactory fakeImage *workerfakes.FakeImage - dbWorker *dbfakes.FakeWorker + fakeDBWorker *dbfakes.FakeWorker fakeDBVolumeRepository *dbfakes.FakeVolumeRepository fakeDBTeamFactory *dbfakes.FakeTeamFactory fakeDBTeam *dbfakes.FakeTeam @@ -69,9 +68,9 @@ var _ = Describe("Worker", func() { fakeContainerOwner *dbfakes.FakeContainerOwner containerMetadata db.ContainerMetadata - stubbedVolumes map[string]*workerfakes.FakeVolume - volumeSpecs map[string]VolumeSpec - credsResourceTypes creds.VersionedResourceTypes + stubbedVolumes map[string]*workerfakes.FakeVolume + volumeSpecs map[string]VolumeSpec + atcResourceTypes atc.VersionedResourceTypes findOrCreateErr error findOrCreateContainer Container @@ -95,7 +94,7 @@ var _ = Describe("Worker", func() { workerName = "some-worker" workerStartTime = time.Now().Unix() workerVersion = "1.2.3" - dbWorker = new(dbfakes.FakeWorker) + fakeDBWorker = new(dbfakes.FakeWorker) fakeGardenClient = new(gardenfakes.FakeClient) fakeImageFactory = new(workerfakes.FakeImageFactory) @@ -109,7 +108,6 @@ var _ = Describe("Worker", func() { fakeDBVolumeRepository = new(dbfakes.FakeVolumeRepository) - fakeDBTeamFactory = new(dbfakes.FakeTeamFactory) fakeDBTeam = new(dbfakes.FakeTeam) fakeDBTeamFactory.GetByIDReturns(fakeDBTeam) @@ -204,11 +202,6 @@ var _ = Describe("Worker", func() { StepName: "some-step", } - variables := template.StaticVariables{ - "secret-image": "super-secret-image", - "secret-source": "super-secret-source", - } - cpu := uint64(1024) memory := uint64(1024) containerSpec = ContainerSpec{ @@ -217,7 +210,7 @@ var _ = Describe("Worker", func() { ImageSpec: ImageSpec{ ImageResource: &ImageResource{ Type: "registry-image", - Source: creds.NewSource(variables, atc.Source{"some": "((secret-image))"}), + Source: atc.Source{"some": "super-secret-image"}, }, }, @@ -243,34 +236,34 @@ var _ = Describe("Worker", func() { }, } - credsResourceTypes = creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + atcResourceTypes = atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Type: "some-type", - Source: atc.Source{"some": "((secret-source))"}, + Source: atc.Source{"some": "super-secret-source"}, }, Version: atc.Version{"some": "version"}, }, - }) + } fakeGardenContainer = new(gardenfakes.FakeContainer) fakeGardenClient.CreateReturns(fakeGardenContainer, nil) }) JustBeforeEach(func() { - dbWorker.ActiveContainersReturns(activeContainers) - dbWorker.ResourceTypesReturns(resourceTypes) - dbWorker.PlatformReturns(platform) - dbWorker.TagsReturns(tags) - dbWorker.EphemeralReturns(ephemeral) - dbWorker.TeamIDReturns(teamID) - dbWorker.NameReturns(workerName) - dbWorker.StartTimeReturns(workerStartTime) - dbWorker.VersionReturns(&workerVersion) - dbWorker.HTTPProxyURLReturns("http://proxy.com") - dbWorker.HTTPSProxyURLReturns("https://proxy.com") - dbWorker.NoProxyReturns("http://noproxy.com") - dbWorker.CreateContainerReturns(fakeCreatingContainer, nil) + fakeDBWorker.ActiveContainersReturns(activeContainers) + fakeDBWorker.ResourceTypesReturns(resourceTypes) + fakeDBWorker.PlatformReturns(platform) + fakeDBWorker.TagsReturns(tags) + fakeDBWorker.EphemeralReturns(ephemeral) + fakeDBWorker.TeamIDReturns(teamID) + fakeDBWorker.NameReturns(workerName) + fakeDBWorker.StartTimeReturns(workerStartTime) + fakeDBWorker.VersionReturns(&workerVersion) + fakeDBWorker.HTTPProxyURLReturns("http://proxy.com") + fakeDBWorker.HTTPSProxyURLReturns("https://proxy.com") + fakeDBWorker.NoProxyReturns("http://noproxy.com") + fakeDBWorker.CreateContainerReturns(fakeCreatingContainer, nil) gardenWorker = NewGardenWorker( fakeGardenClient, @@ -278,7 +271,7 @@ var _ = Describe("Worker", func() { fakeVolumeClient, fakeImageFactory, fakeDBTeamFactory, - dbWorker, + fakeDBWorker, 0, ) }) @@ -590,13 +583,12 @@ var _ = Describe("Worker", func() { satisfies bool - customTypes creds.VersionedResourceTypes + customTypes atc.VersionedResourceTypes ) BeforeEach(func() { - variables := template.StaticVariables{} - customTypes = creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + customTypes = atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "custom-type-b", @@ -637,7 +629,7 @@ var _ = Describe("Worker", func() { }, Version: atc.Version{"some": "version"}, }, - }) + } spec = WorkerSpec{ Tags: []string{"some", "tags"}, @@ -765,9 +757,8 @@ var _ = Describe("Worker", func() { Context("when the resource type is a custom type that overrides one supported by the worker", func() { BeforeEach(func() { - variables := template.StaticVariables{} - customTypes = creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + customTypes = atc.VersionedResourceTypes{ { ResourceType: atc.ResourceType{ Name: "some-resource", @@ -776,7 +767,7 @@ var _ = Describe("Worker", func() { }, Version: atc.Version{"some": "version"}, }, - }) + } spec.ResourceType = "some-resource" }) @@ -788,9 +779,8 @@ var _ = Describe("Worker", func() { Context("when the resource type is a custom type that results in a circular dependency", func() { BeforeEach(func() { - variables := template.StaticVariables{} - customTypes = creds.NewVersionedResourceTypes(variables, atc.VersionedResourceTypes{ + customTypes = atc.VersionedResourceTypes{ atc.VersionedResourceType{ ResourceType: atc.ResourceType{ Name: "circle-a", @@ -813,7 +803,7 @@ var _ = Describe("Worker", func() { }, Version: atc.Version{"some": "version"}, }, - }) + } spec.ResourceType = "circle-a" }) @@ -895,6 +885,52 @@ var _ = Describe("Worker", func() { }) }) + Describe("EnsureDBContainerExists", func() { + var err error + + JustBeforeEach(func() { + err = gardenWorker.EnsureDBContainerExists( + ctx, + logger, + fakeContainerOwner, + containerMetadata, + ) + Expect(err).ToNot(HaveOccurred()) + }) + Context("when neither a creating/created container exists", func() { + BeforeEach(func() { + fakeDBWorker.FindContainerReturns(nil, nil, nil) + fakeDBWorker.CreateContainerReturns(fakeCreatingContainer, nil) + }) + It("creates a creating container", func() { + Expect(fakeDBWorker.CreateContainerCallCount()).To(Equal(1)) + owner, metadata := fakeDBWorker.CreateContainerArgsForCall(0) + Expect(owner).To(Equal(fakeContainerOwner)) + Expect(metadata).To(Equal(containerMetadata)) + }) + + }) + + Context("when a creating container exists", func() { + BeforeEach(func() { + fakeDBWorker.FindContainerReturns(fakeCreatingContainer, nil, nil) + }) + It("does not create a new db container", func() { + Expect(fakeDBWorker.CreateContainerCallCount()).To(Equal(0)) + }) + }) + + Context("when a created container exists", func() { + BeforeEach(func() { + fakeDBWorker.FindContainerReturns(nil, fakeCreatedContainer, nil) + }) + It("does not create a new db container", func() { + Expect(fakeDBWorker.CreateContainerCallCount()).To(Equal(0)) + }) + + }) + }) + Describe("FindOrCreateContainer", func() { CertsVolumeExists := func() { fakeCertsVolume := new(baggageclaimfakes.FakeVolume) @@ -907,16 +943,15 @@ var _ = Describe("Worker", func() { logger, fakeImageFetchingDelegate, fakeContainerOwner, - containerMetadata, containerSpec, - credsResourceTypes, + atcResourceTypes, ) }) disasterErr := errors.New("disaster") Context("when container exists in database in creating state", func() { BeforeEach(func() { - dbWorker.FindContainerOnWorkerReturns(fakeCreatingContainer, nil, nil) + fakeDBWorker.FindContainerReturns(fakeCreatingContainer, nil, nil) }) Context("when container exists in garden", func() { @@ -951,6 +986,532 @@ var _ = Describe("Worker", func() { Expect(findOrCreateContainer).ToNot(BeNil()) }) + It("creates the container in garden with the input and output volumes in alphabetical order", func() { + Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) + + actualSpec := fakeGardenClient.CreateArgsForCall(0) + Expect(actualSpec).To(Equal(garden.ContainerSpec{ + Handle: "some-handle", + RootFSPath: "some-image-url", + Properties: garden.Properties{"user": "some-user"}, + BindMounts: []garden.BindMount{ + { + SrcPath: "some/source", + DstPath: "some/destination", + Mode: garden.BindMountModeRO, + }, + { + SrcPath: "/fake/scratch/volume", + DstPath: "/scratch", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/work-dir/volume", + DstPath: "/some/work-dir", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/local/cow/volume", + DstPath: "/some/work-dir/local-input", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/output/volume", + DstPath: "/some/work-dir/output", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/remote/input/container/volume", + DstPath: "/some/work-dir/remote-input", + Mode: garden.BindMountModeRW, + }, + }, + Limits: garden.Limits{ + CPU: garden.CPULimits{LimitInShares: 1024}, + Memory: garden.MemoryLimits{LimitInBytes: 1024}, + }, + Env: []string{ + "IMAGE=ENV", + "SOME=ENV", + "http_proxy=http://proxy.com", + "https_proxy=https://proxy.com", + "no_proxy=http://noproxy.com", + }, + })) + }) + + Context("when the input and output destination paths overlap", func() { + var ( + fakeRemoteInputUnderInput *workerfakes.FakeInputSource + fakeRemoteInputUnderInputAS *workerfakes.FakeArtifactSource + fakeRemoteInputUnderOutput *workerfakes.FakeInputSource + fakeRemoteInputUnderOutputAS *workerfakes.FakeArtifactSource + + fakeOutputUnderInputVolume *workerfakes.FakeVolume + fakeOutputUnderOutputVolume *workerfakes.FakeVolume + fakeRemoteInputUnderInputContainerVolume *workerfakes.FakeVolume + fakeRemoteInputUnderOutputContainerVolume *workerfakes.FakeVolume + ) + + BeforeEach(func() { + fakeRemoteInputUnderInput = new(workerfakes.FakeInputSource) + fakeRemoteInputUnderInput.DestinationPathReturns("/some/work-dir/remote-input/other-input") + fakeRemoteInputUnderInputAS = new(workerfakes.FakeArtifactSource) + fakeRemoteInputUnderInputAS.VolumeOnReturns(nil, false, nil) + fakeRemoteInputUnderInput.SourceReturns(fakeRemoteInputUnderInputAS) + + fakeRemoteInputUnderOutput = new(workerfakes.FakeInputSource) + fakeRemoteInputUnderOutput.DestinationPathReturns("/some/work-dir/output/input") + fakeRemoteInputUnderOutputAS = new(workerfakes.FakeArtifactSource) + fakeRemoteInputUnderOutputAS.VolumeOnReturns(nil, false, nil) + fakeRemoteInputUnderOutput.SourceReturns(fakeRemoteInputUnderOutputAS) + + fakeOutputUnderInputVolume = new(workerfakes.FakeVolume) + fakeOutputUnderInputVolume.PathReturns("/fake/output/under/input/volume") + fakeOutputUnderOutputVolume = new(workerfakes.FakeVolume) + fakeOutputUnderOutputVolume.PathReturns("/fake/output/other-output/volume") + + fakeRemoteInputUnderInputContainerVolume = new(workerfakes.FakeVolume) + fakeRemoteInputUnderInputContainerVolume.PathReturns("/fake/remote/input/other-input/container/volume") + fakeRemoteInputUnderOutputContainerVolume = new(workerfakes.FakeVolume) + fakeRemoteInputUnderOutputContainerVolume.PathReturns("/fake/output/input/container/volume") + + stubbedVolumes["/some/work-dir/remote-input/other-input"] = fakeRemoteInputUnderInputContainerVolume + stubbedVolumes["/some/work-dir/output/input"] = fakeRemoteInputUnderOutputContainerVolume + stubbedVolumes["/some/work-dir/output/other-output"] = fakeOutputUnderOutputVolume + stubbedVolumes["/some/work-dir/local-input/output"] = fakeOutputUnderInputVolume + }) + + Context("outputs are nested under inputs", func() { + BeforeEach(func() { + containerSpec.Inputs = []InputSource{ + fakeLocalInput, + } + containerSpec.Outputs = OutputPaths{ + "some-output-under-input": "/some/work-dir/local-input/output", + } + }) + + It("creates the container with correct bind mounts", func() { + Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) + + actualSpec := fakeGardenClient.CreateArgsForCall(0) + Expect(actualSpec).To(Equal(garden.ContainerSpec{ + Handle: "some-handle", + RootFSPath: "some-image-url", + Properties: garden.Properties{"user": "some-user"}, + BindMounts: []garden.BindMount{ + { + SrcPath: "some/source", + DstPath: "some/destination", + Mode: garden.BindMountModeRO, + }, + { + SrcPath: "/fake/scratch/volume", + DstPath: "/scratch", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/work-dir/volume", + DstPath: "/some/work-dir", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/local/cow/volume", + DstPath: "/some/work-dir/local-input", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/output/under/input/volume", + DstPath: "/some/work-dir/local-input/output", + Mode: garden.BindMountModeRW, + }, + }, + Limits: garden.Limits{ + CPU: garden.CPULimits{LimitInShares: 1024}, + Memory: garden.MemoryLimits{LimitInBytes: 1024}, + }, + Env: []string{ + "IMAGE=ENV", + "SOME=ENV", + "http_proxy=http://proxy.com", + "https_proxy=https://proxy.com", + "no_proxy=http://noproxy.com", + }, + })) + }) + }) + + Context("inputs are nested under inputs", func() { + BeforeEach(func() { + containerSpec.Inputs = []InputSource{ + fakeRemoteInput, + fakeRemoteInputUnderInput, + } + containerSpec.Outputs = OutputPaths{} + }) + + It("creates the container with correct bind mounts", func() { + Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) + + actualSpec := fakeGardenClient.CreateArgsForCall(0) + Expect(actualSpec).To(Equal(garden.ContainerSpec{ + Handle: "some-handle", + RootFSPath: "some-image-url", + Properties: garden.Properties{"user": "some-user"}, + BindMounts: []garden.BindMount{ + { + SrcPath: "some/source", + DstPath: "some/destination", + Mode: garden.BindMountModeRO, + }, + { + SrcPath: "/fake/scratch/volume", + DstPath: "/scratch", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/work-dir/volume", + DstPath: "/some/work-dir", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/remote/input/container/volume", + DstPath: "/some/work-dir/remote-input", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/remote/input/other-input/container/volume", + DstPath: "/some/work-dir/remote-input/other-input", + Mode: garden.BindMountModeRW, + }, + }, + Limits: garden.Limits{ + CPU: garden.CPULimits{LimitInShares: 1024}, + Memory: garden.MemoryLimits{LimitInBytes: 1024}, + }, + Env: []string{ + "IMAGE=ENV", + "SOME=ENV", + "http_proxy=http://proxy.com", + "https_proxy=https://proxy.com", + "no_proxy=http://noproxy.com", + }, + })) + }) + }) + + Context("outputs are nested under outputs", func() { + BeforeEach(func() { + containerSpec.Inputs = []InputSource{} + containerSpec.Outputs = OutputPaths{ + "some-output": "/some/work-dir/output", + "some-output-under-output": "/some/work-dir/output/other-output", + } + }) + + It("creates the container with correct bind mounts", func() { + Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) + + actualSpec := fakeGardenClient.CreateArgsForCall(0) + Expect(actualSpec).To(Equal(garden.ContainerSpec{ + Handle: "some-handle", + RootFSPath: "some-image-url", + Properties: garden.Properties{"user": "some-user"}, + BindMounts: []garden.BindMount{ + { + SrcPath: "some/source", + DstPath: "some/destination", + Mode: garden.BindMountModeRO, + }, + { + SrcPath: "/fake/scratch/volume", + DstPath: "/scratch", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/work-dir/volume", + DstPath: "/some/work-dir", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/output/volume", + DstPath: "/some/work-dir/output", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/output/other-output/volume", + DstPath: "/some/work-dir/output/other-output", + Mode: garden.BindMountModeRW, + }, + }, + Limits: garden.Limits{ + CPU: garden.CPULimits{LimitInShares: 1024}, + Memory: garden.MemoryLimits{LimitInBytes: 1024}, + }, + Env: []string{ + "IMAGE=ENV", + "SOME=ENV", + "http_proxy=http://proxy.com", + "https_proxy=https://proxy.com", + "no_proxy=http://noproxy.com", + }, + })) + }) + }) + + Context("inputs are nested under outputs", func() { + BeforeEach(func() { + containerSpec.Inputs = []InputSource{ + fakeRemoteInputUnderOutput, + } + containerSpec.Outputs = OutputPaths{ + "some-output": "/some/work-dir/output", + } + }) + + It("creates the container with correct bind mounts", func() { + Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) + + actualSpec := fakeGardenClient.CreateArgsForCall(0) + Expect(actualSpec).To(Equal(garden.ContainerSpec{ + Handle: "some-handle", + RootFSPath: "some-image-url", + Properties: garden.Properties{"user": "some-user"}, + BindMounts: []garden.BindMount{ + { + SrcPath: "some/source", + DstPath: "some/destination", + Mode: garden.BindMountModeRO, + }, + { + SrcPath: "/fake/scratch/volume", + DstPath: "/scratch", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/work-dir/volume", + DstPath: "/some/work-dir", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/output/volume", + DstPath: "/some/work-dir/output", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/output/input/container/volume", + DstPath: "/some/work-dir/output/input", + Mode: garden.BindMountModeRW, + }, + }, + Limits: garden.Limits{ + CPU: garden.CPULimits{LimitInShares: 1024}, + Memory: garden.MemoryLimits{LimitInBytes: 1024}, + }, + Env: []string{ + "IMAGE=ENV", + "SOME=ENV", + "http_proxy=http://proxy.com", + "https_proxy=https://proxy.com", + "no_proxy=http://noproxy.com", + }, + })) + + }) + }) + + Context("input and output share the same destination path", func() { + BeforeEach(func() { + containerSpec.Inputs = []InputSource{ + fakeRemoteInput, + } + containerSpec.Outputs = OutputPaths{ + "some-output": "/some/work-dir/remote-input", + } + }) + + It("creates the container with correct bind mounts", func() { + Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) + + actualSpec := fakeGardenClient.CreateArgsForCall(0) + Expect(actualSpec).To(Equal(garden.ContainerSpec{ + Handle: "some-handle", + RootFSPath: "some-image-url", + Properties: garden.Properties{"user": "some-user"}, + BindMounts: []garden.BindMount{ + { + SrcPath: "some/source", + DstPath: "some/destination", + Mode: garden.BindMountModeRO, + }, + { + SrcPath: "/fake/scratch/volume", + DstPath: "/scratch", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/work-dir/volume", + DstPath: "/some/work-dir", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/remote/input/container/volume", + DstPath: "/some/work-dir/remote-input", + Mode: garden.BindMountModeRW, + }, + }, + Limits: garden.Limits{ + CPU: garden.CPULimits{LimitInShares: 1024}, + Memory: garden.MemoryLimits{LimitInBytes: 1024}, + }, + Env: []string{ + "IMAGE=ENV", + "SOME=ENV", + "http_proxy=http://proxy.com", + "https_proxy=https://proxy.com", + "no_proxy=http://noproxy.com", + }, + })) + }) + + }) + }) + + Context("when the certs volume does not exist on the worker", func() { + BeforeEach(func() { + fakeBaggageclaimClient.LookupVolumeReturns(nil, false, nil) + }) + It("creates the container in garden, but does not bind mount any certs", func() { + Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) + actualSpec := fakeGardenClient.CreateArgsForCall(0) + Expect(actualSpec.BindMounts).ToNot(ContainElement( + garden.BindMount{ + SrcPath: "/the/certs/volume/path", + DstPath: "/etc/ssl/certs", + Mode: garden.BindMountModeRO, + }, + )) + }) + }) + + It("creates each volume unprivileged", func() { + Expect(volumeSpecs).To(Equal(map[string]VolumeSpec{ + "/scratch": VolumeSpec{Strategy: baggageclaim.EmptyStrategy{}}, + "/some/work-dir": VolumeSpec{Strategy: baggageclaim.EmptyStrategy{}}, + "/some/work-dir/output": VolumeSpec{Strategy: baggageclaim.EmptyStrategy{}}, + "/some/work-dir/local-input": VolumeSpec{Strategy: fakeLocalVolume.COWStrategy()}, + "/some/work-dir/remote-input": VolumeSpec{Strategy: baggageclaim.EmptyStrategy{}}, + })) + }) + + It("streams remote inputs into newly created container volumes", func() { + Expect(fakeRemoteInputAS.StreamToCallCount()).To(Equal(1)) + _, ad := fakeRemoteInputAS.StreamToArgsForCall(0) + + err := ad.StreamIn(".", bytes.NewBufferString("some-stream")) + Expect(err).ToNot(HaveOccurred()) + + Expect(fakeRemoteInputContainerVolume.StreamInCallCount()).To(Equal(1)) + + dst, from := fakeRemoteInputContainerVolume.StreamInArgsForCall(0) + Expect(dst).To(Equal(".")) + Expect(ioutil.ReadAll(from)).To(Equal([]byte("some-stream"))) + }) + + It("marks container as created", func() { + Expect(fakeCreatingContainer.CreatedCallCount()).To(Equal(1)) + }) + + Context("when the fetched image was privileged", func() { + BeforeEach(func() { + fakeImage.FetchForContainerReturns(FetchedImage{ + Privileged: true, + Metadata: ImageMetadata{ + Env: []string{"IMAGE=ENV"}, + }, + URL: "some-image-url", + }, nil) + }) + + It("creates the container privileged", func() { + Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) + + actualSpec := fakeGardenClient.CreateArgsForCall(0) + Expect(actualSpec.Privileged).To(BeTrue()) + }) + + It("creates each volume privileged", func() { + Expect(volumeSpecs).To(Equal(map[string]VolumeSpec{ + "/scratch": VolumeSpec{Privileged: true, Strategy: baggageclaim.EmptyStrategy{}}, + "/some/work-dir": VolumeSpec{Privileged: true, Strategy: baggageclaim.EmptyStrategy{}}, + "/some/work-dir/output": VolumeSpec{Privileged: true, Strategy: baggageclaim.EmptyStrategy{}}, + "/some/work-dir/local-input": VolumeSpec{Privileged: true, Strategy: fakeLocalVolume.COWStrategy()}, + "/some/work-dir/remote-input": VolumeSpec{Privileged: true, Strategy: baggageclaim.EmptyStrategy{}}, + })) + }) + + }) + + Context("when an input has the path set to the workdir itself", func() { + BeforeEach(func() { + fakeLocalInput.DestinationPathReturns("/some/work-dir") + delete(stubbedVolumes, "/some/work-dir/local-input") + stubbedVolumes["/some/work-dir"] = fakeLocalCOWVolume + }) + + It("does not create or mount a work-dir, as we support this for backwards-compatibility", func() { + Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) + + actualSpec := fakeGardenClient.CreateArgsForCall(0) + Expect(actualSpec.BindMounts).To(Equal([]garden.BindMount{ + { + SrcPath: "some/source", + DstPath: "some/destination", + Mode: garden.BindMountModeRO, + }, + { + SrcPath: "/fake/scratch/volume", + DstPath: "/scratch", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/local/cow/volume", + DstPath: "/some/work-dir", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/output/volume", + DstPath: "/some/work-dir/output", + Mode: garden.BindMountModeRW, + }, + { + SrcPath: "/fake/remote/input/container/volume", + DstPath: "/some/work-dir/remote-input", + Mode: garden.BindMountModeRW, + }, + })) + }) + }) + + Context("when failing to create container in garden", func() { + BeforeEach(func() { + fakeGardenClient.CreateReturns(nil, disasterErr) + }) + + It("returns an error", func() { + Expect(findOrCreateErr).To(Equal(disasterErr)) + }) + + It("does not mark container as created", func() { + Expect(fakeCreatingContainer.CreatedCallCount()).To(Equal(0)) + }) + + It("marks the container as failed", func() { + Expect(fakeCreatingContainer.FailedCallCount()).To(Equal(1)) + }) + }) + Context("when failing to create container in garden", func() { BeforeEach(func() { fakeGardenClient.CreateReturns(nil, disasterErr) @@ -970,7 +1531,7 @@ var _ = Describe("Worker", func() { Context("when container exists in database in created state", func() { BeforeEach(func() { - dbWorker.FindContainerOnWorkerReturns(nil, fakeCreatedContainer, nil) + fakeDBWorker.FindContainerReturns(nil, fakeCreatedContainer, nil) }) Context("when container exists in garden", func() { @@ -1000,544 +1561,20 @@ var _ = Describe("Worker", func() { Context("when container does not exist in database", func() { BeforeEach(func() { - dbWorker.FindContainerOnWorkerReturns(nil, nil, nil) + fakeDBWorker.FindContainerReturns(nil, nil, nil) }) - Context("when the certs volume does not exist on the worker", func() { - BeforeEach(func() { - fakeBaggageclaimClient.LookupVolumeReturns(nil, false, nil) - }) - It("creates the container in garden, but does not bind mount any certs", func() { - Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) - actualSpec := fakeGardenClient.CreateArgsForCall(0) - Expect(actualSpec.BindMounts).ToNot(ContainElement( - garden.BindMount{ - SrcPath: "/the/certs/volume/path", - DstPath: "/etc/ssl/certs", - Mode: garden.BindMountModeRO, - }, - )) - }) - }) - - BeforeEach(func() { - fakeCertsVolume := new(baggageclaimfakes.FakeVolume) - fakeCertsVolume.PathReturns("/the/certs/volume/path") - fakeBaggageclaimClient.LookupVolumeReturns(fakeCertsVolume, true, nil) - }) + // BeforeEach(func() { + // fakeCertsVolume := new(baggageclaimfakes.FakeVolume) + // fakeCertsVolume.PathReturns("/the/certs/volume/path") + // fakeBaggageclaimClient.LookupVolumeReturns(fakeCertsVolume, true, nil) + // }) It("creates container in database", func() { - Expect(dbWorker.CreateContainerCallCount()).To(Equal(1)) + Expect(findOrCreateErr).To(HaveOccurred()) + Expect(findOrCreateErr.Error()).To(Equal("no db container was found for owner")) }) - It("creates the container in garden with the input and output volumes in alphabetical order", func() { - Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) - - actualSpec := fakeGardenClient.CreateArgsForCall(0) - Expect(actualSpec).To(Equal(garden.ContainerSpec{ - Handle: "some-handle", - RootFSPath: "some-image-url", - Properties: garden.Properties{"user": "some-user"}, - BindMounts: []garden.BindMount{ - { - SrcPath: "some/source", - DstPath: "some/destination", - Mode: garden.BindMountModeRO, - }, - { - SrcPath: "/fake/scratch/volume", - DstPath: "/scratch", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/work-dir/volume", - DstPath: "/some/work-dir", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/local/cow/volume", - DstPath: "/some/work-dir/local-input", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/output/volume", - DstPath: "/some/work-dir/output", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/remote/input/container/volume", - DstPath: "/some/work-dir/remote-input", - Mode: garden.BindMountModeRW, - }, - }, - Limits: garden.Limits{ - CPU: garden.CPULimits{LimitInShares: 1024}, - Memory: garden.MemoryLimits{LimitInBytes: 1024}, - }, - Env: []string{ - "IMAGE=ENV", - "SOME=ENV", - "http_proxy=http://proxy.com", - "https_proxy=https://proxy.com", - "no_proxy=http://noproxy.com", - }, - })) - }) - - Context("when the input and output destination paths overlap", func() { - var ( - fakeRemoteInputUnderInput *workerfakes.FakeInputSource - fakeRemoteInputUnderInputAS *workerfakes.FakeArtifactSource - fakeRemoteInputUnderOutput *workerfakes.FakeInputSource - fakeRemoteInputUnderOutputAS *workerfakes.FakeArtifactSource - - fakeOutputUnderInputVolume *workerfakes.FakeVolume - fakeOutputUnderOutputVolume *workerfakes.FakeVolume - fakeRemoteInputUnderInputContainerVolume *workerfakes.FakeVolume - fakeRemoteInputUnderOutputContainerVolume *workerfakes.FakeVolume - ) - - BeforeEach(func() { - fakeRemoteInputUnderInput = new(workerfakes.FakeInputSource) - fakeRemoteInputUnderInput.DestinationPathReturns("/some/work-dir/remote-input/other-input") - fakeRemoteInputUnderInputAS = new(workerfakes.FakeArtifactSource) - fakeRemoteInputUnderInputAS.VolumeOnReturns(nil, false, nil) - fakeRemoteInputUnderInput.SourceReturns(fakeRemoteInputUnderInputAS) - - fakeRemoteInputUnderOutput = new(workerfakes.FakeInputSource) - fakeRemoteInputUnderOutput.DestinationPathReturns("/some/work-dir/output/input") - fakeRemoteInputUnderOutputAS = new(workerfakes.FakeArtifactSource) - fakeRemoteInputUnderOutputAS.VolumeOnReturns(nil, false, nil) - fakeRemoteInputUnderOutput.SourceReturns(fakeRemoteInputUnderOutputAS) - - fakeOutputUnderInputVolume = new(workerfakes.FakeVolume) - fakeOutputUnderInputVolume.PathReturns("/fake/output/under/input/volume") - fakeOutputUnderOutputVolume = new(workerfakes.FakeVolume) - fakeOutputUnderOutputVolume.PathReturns("/fake/output/other-output/volume") - - fakeRemoteInputUnderInputContainerVolume = new(workerfakes.FakeVolume) - fakeRemoteInputUnderInputContainerVolume.PathReturns("/fake/remote/input/other-input/container/volume") - fakeRemoteInputUnderOutputContainerVolume = new(workerfakes.FakeVolume) - fakeRemoteInputUnderOutputContainerVolume.PathReturns("/fake/output/input/container/volume") - - stubbedVolumes["/some/work-dir/remote-input/other-input"] = fakeRemoteInputUnderInputContainerVolume - stubbedVolumes["/some/work-dir/output/input"] = fakeRemoteInputUnderOutputContainerVolume - stubbedVolumes["/some/work-dir/output/other-output"] = fakeOutputUnderOutputVolume - stubbedVolumes["/some/work-dir/local-input/output"] = fakeOutputUnderInputVolume - }) - - Context("outputs are nested under inputs", func() { - BeforeEach(func() { - containerSpec.Inputs = []InputSource{ - fakeLocalInput, - } - containerSpec.Outputs = OutputPaths{ - "some-output-under-input": "/some/work-dir/local-input/output", - } - }) - - It("creates the container with correct bind mounts", func() { - Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) - - actualSpec := fakeGardenClient.CreateArgsForCall(0) - Expect(actualSpec).To(Equal(garden.ContainerSpec{ - Handle: "some-handle", - RootFSPath: "some-image-url", - Properties: garden.Properties{"user": "some-user"}, - BindMounts: []garden.BindMount{ - { - SrcPath: "some/source", - DstPath: "some/destination", - Mode: garden.BindMountModeRO, - }, - { - SrcPath: "/fake/scratch/volume", - DstPath: "/scratch", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/work-dir/volume", - DstPath: "/some/work-dir", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/local/cow/volume", - DstPath: "/some/work-dir/local-input", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/output/under/input/volume", - DstPath: "/some/work-dir/local-input/output", - Mode: garden.BindMountModeRW, - }, - }, - Limits: garden.Limits{ - CPU: garden.CPULimits{LimitInShares: 1024}, - Memory: garden.MemoryLimits{LimitInBytes: 1024}, - }, - Env: []string{ - "IMAGE=ENV", - "SOME=ENV", - "http_proxy=http://proxy.com", - "https_proxy=https://proxy.com", - "no_proxy=http://noproxy.com", - }, - })) - }) - }) - - Context("inputs are nested under inputs", func() { - BeforeEach(func() { - containerSpec.Inputs = []InputSource{ - fakeRemoteInput, - fakeRemoteInputUnderInput, - } - containerSpec.Outputs = OutputPaths{} - }) - - It("creates the container with correct bind mounts", func() { - Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) - - actualSpec := fakeGardenClient.CreateArgsForCall(0) - Expect(actualSpec).To(Equal(garden.ContainerSpec{ - Handle: "some-handle", - RootFSPath: "some-image-url", - Properties: garden.Properties{"user": "some-user"}, - BindMounts: []garden.BindMount{ - { - SrcPath: "some/source", - DstPath: "some/destination", - Mode: garden.BindMountModeRO, - }, - { - SrcPath: "/fake/scratch/volume", - DstPath: "/scratch", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/work-dir/volume", - DstPath: "/some/work-dir", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/remote/input/container/volume", - DstPath: "/some/work-dir/remote-input", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/remote/input/other-input/container/volume", - DstPath: "/some/work-dir/remote-input/other-input", - Mode: garden.BindMountModeRW, - }, - }, - Limits: garden.Limits{ - CPU: garden.CPULimits{LimitInShares: 1024}, - Memory: garden.MemoryLimits{LimitInBytes: 1024}, - }, - Env: []string{ - "IMAGE=ENV", - "SOME=ENV", - "http_proxy=http://proxy.com", - "https_proxy=https://proxy.com", - "no_proxy=http://noproxy.com", - }, - })) - }) - }) - - Context("outputs are nested under outputs", func() { - BeforeEach(func() { - containerSpec.Inputs = []InputSource{} - containerSpec.Outputs = OutputPaths{ - "some-output": "/some/work-dir/output", - "some-output-under-output": "/some/work-dir/output/other-output", - } - }) - - It("creates the container with correct bind mounts", func() { - Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) - - actualSpec := fakeGardenClient.CreateArgsForCall(0) - Expect(actualSpec).To(Equal(garden.ContainerSpec{ - Handle: "some-handle", - RootFSPath: "some-image-url", - Properties: garden.Properties{"user": "some-user"}, - BindMounts: []garden.BindMount{ - { - SrcPath: "some/source", - DstPath: "some/destination", - Mode: garden.BindMountModeRO, - }, - { - SrcPath: "/fake/scratch/volume", - DstPath: "/scratch", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/work-dir/volume", - DstPath: "/some/work-dir", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/output/volume", - DstPath: "/some/work-dir/output", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/output/other-output/volume", - DstPath: "/some/work-dir/output/other-output", - Mode: garden.BindMountModeRW, - }, - }, - Limits: garden.Limits{ - CPU: garden.CPULimits{LimitInShares: 1024}, - Memory: garden.MemoryLimits{LimitInBytes: 1024}, - }, - Env: []string{ - "IMAGE=ENV", - "SOME=ENV", - "http_proxy=http://proxy.com", - "https_proxy=https://proxy.com", - "no_proxy=http://noproxy.com", - }, - })) - }) - }) - - Context("inputs are nested under outputs", func() { - BeforeEach(func() { - containerSpec.Inputs = []InputSource{ - fakeRemoteInputUnderOutput, - } - containerSpec.Outputs = OutputPaths{ - "some-output": "/some/work-dir/output", - } - }) - - It("creates the container with correct bind mounts", func() { - Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) - - actualSpec := fakeGardenClient.CreateArgsForCall(0) - Expect(actualSpec).To(Equal(garden.ContainerSpec{ - Handle: "some-handle", - RootFSPath: "some-image-url", - Properties: garden.Properties{"user": "some-user"}, - BindMounts: []garden.BindMount{ - { - SrcPath: "some/source", - DstPath: "some/destination", - Mode: garden.BindMountModeRO, - }, - { - SrcPath: "/fake/scratch/volume", - DstPath: "/scratch", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/work-dir/volume", - DstPath: "/some/work-dir", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/output/volume", - DstPath: "/some/work-dir/output", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/output/input/container/volume", - DstPath: "/some/work-dir/output/input", - Mode: garden.BindMountModeRW, - }, - }, - Limits: garden.Limits{ - CPU: garden.CPULimits{LimitInShares: 1024}, - Memory: garden.MemoryLimits{LimitInBytes: 1024}, - }, - Env: []string{ - "IMAGE=ENV", - "SOME=ENV", - "http_proxy=http://proxy.com", - "https_proxy=https://proxy.com", - "no_proxy=http://noproxy.com", - }, - })) - - }) - }) - - Context("input and output share the same destination path", func() { - BeforeEach(func() { - containerSpec.Inputs = []InputSource{ - fakeRemoteInput, - } - containerSpec.Outputs = OutputPaths{ - "some-output": "/some/work-dir/remote-input", - } - }) - - It("creates the container with correct bind mounts", func() { - Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) - - actualSpec := fakeGardenClient.CreateArgsForCall(0) - Expect(actualSpec).To(Equal(garden.ContainerSpec{ - Handle: "some-handle", - RootFSPath: "some-image-url", - Properties: garden.Properties{"user": "some-user"}, - BindMounts: []garden.BindMount{ - { - SrcPath: "some/source", - DstPath: "some/destination", - Mode: garden.BindMountModeRO, - }, - { - SrcPath: "/fake/scratch/volume", - DstPath: "/scratch", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/work-dir/volume", - DstPath: "/some/work-dir", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/remote/input/container/volume", - DstPath: "/some/work-dir/remote-input", - Mode: garden.BindMountModeRW, - }, - }, - Limits: garden.Limits{ - CPU: garden.CPULimits{LimitInShares: 1024}, - Memory: garden.MemoryLimits{LimitInBytes: 1024}, - }, - Env: []string{ - "IMAGE=ENV", - "SOME=ENV", - "http_proxy=http://proxy.com", - "https_proxy=https://proxy.com", - "no_proxy=http://noproxy.com", - }, - })) - }) - - }) - }) - - It("creates each volume unprivileged", func() { - Expect(volumeSpecs).To(Equal(map[string]VolumeSpec{ - "/scratch": VolumeSpec{Strategy: baggageclaim.EmptyStrategy{}}, - "/some/work-dir": VolumeSpec{Strategy: baggageclaim.EmptyStrategy{}}, - "/some/work-dir/output": VolumeSpec{Strategy: baggageclaim.EmptyStrategy{}}, - "/some/work-dir/local-input": VolumeSpec{Strategy: fakeLocalVolume.COWStrategy()}, - "/some/work-dir/remote-input": VolumeSpec{Strategy: baggageclaim.EmptyStrategy{}}, - })) - }) - - It("streams remote inputs into newly created container volumes", func() { - Expect(fakeRemoteInputAS.StreamToCallCount()).To(Equal(1)) - _, ad := fakeRemoteInputAS.StreamToArgsForCall(0) - - err := ad.StreamIn(".", bytes.NewBufferString("some-stream")) - Expect(err).ToNot(HaveOccurred()) - - Expect(fakeRemoteInputContainerVolume.StreamInCallCount()).To(Equal(1)) - - dst, from := fakeRemoteInputContainerVolume.StreamInArgsForCall(0) - Expect(dst).To(Equal(".")) - Expect(ioutil.ReadAll(from)).To(Equal([]byte("some-stream"))) - }) - - It("marks container as created", func() { - Expect(fakeCreatingContainer.CreatedCallCount()).To(Equal(1)) - }) - - Context("when the fetched image was privileged", func() { - BeforeEach(func() { - fakeImage.FetchForContainerReturns(FetchedImage{ - Privileged: true, - Metadata: ImageMetadata{ - Env: []string{"IMAGE=ENV"}, - }, - URL: "some-image-url", - }, nil) - }) - - It("creates the container privileged", func() { - Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) - - actualSpec := fakeGardenClient.CreateArgsForCall(0) - Expect(actualSpec.Privileged).To(BeTrue()) - }) - - It("creates each volume privileged", func() { - Expect(volumeSpecs).To(Equal(map[string]VolumeSpec{ - "/scratch": VolumeSpec{Privileged: true, Strategy: baggageclaim.EmptyStrategy{}}, - "/some/work-dir": VolumeSpec{Privileged: true, Strategy: baggageclaim.EmptyStrategy{}}, - "/some/work-dir/output": VolumeSpec{Privileged: true, Strategy: baggageclaim.EmptyStrategy{}}, - "/some/work-dir/local-input": VolumeSpec{Privileged: true, Strategy: fakeLocalVolume.COWStrategy()}, - "/some/work-dir/remote-input": VolumeSpec{Privileged: true, Strategy: baggageclaim.EmptyStrategy{}}, - })) - }) - - }) - - Context("when an input has the path set to the workdir itself", func() { - BeforeEach(func() { - fakeLocalInput.DestinationPathReturns("/some/work-dir") - delete(stubbedVolumes, "/some/work-dir/local-input") - stubbedVolumes["/some/work-dir"] = fakeLocalCOWVolume - }) - - It("does not create or mount a work-dir, as we support this for backwards-compatibility", func() { - Expect(fakeGardenClient.CreateCallCount()).To(Equal(1)) - - actualSpec := fakeGardenClient.CreateArgsForCall(0) - Expect(actualSpec.BindMounts).To(Equal([]garden.BindMount{ - { - SrcPath: "some/source", - DstPath: "some/destination", - Mode: garden.BindMountModeRO, - }, - { - SrcPath: "/fake/scratch/volume", - DstPath: "/scratch", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/local/cow/volume", - DstPath: "/some/work-dir", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/output/volume", - DstPath: "/some/work-dir/output", - Mode: garden.BindMountModeRW, - }, - { - SrcPath: "/fake/remote/input/container/volume", - DstPath: "/some/work-dir/remote-input", - Mode: garden.BindMountModeRW, - }, - })) - }) - }) - - Context("when failing to create container in garden", func() { - BeforeEach(func() { - fakeGardenClient.CreateReturns(nil, disasterErr) - }) - - It("returns an error", func() { - Expect(findOrCreateErr).To(Equal(disasterErr)) - }) - - It("does not mark container as created", func() { - Expect(fakeCreatingContainer.CreatedCallCount()).To(Equal(0)) - }) - - It("marks the container as failed", func() { - Expect(fakeCreatingContainer.FailedCallCount()).To(Equal(1)) - }) - }) }) }) }) diff --git a/atc/worker/workerfakes/fake_image_factory.go b/atc/worker/workerfakes/fake_image_factory.go index ff127e19f..2fa6ea2df 100644 --- a/atc/worker/workerfakes/fake_image_factory.go +++ b/atc/worker/workerfakes/fake_image_factory.go @@ -5,12 +5,12 @@ import ( "sync" "code.cloudfoundry.org/lager" - "github.com/concourse/concourse/atc/creds" + "github.com/concourse/concourse/atc" "github.com/concourse/concourse/atc/worker" ) type FakeImageFactory struct { - GetImageStub func(lager.Logger, worker.Worker, worker.VolumeClient, worker.ImageSpec, int, worker.ImageFetchingDelegate, creds.VersionedResourceTypes) (worker.Image, error) + GetImageStub func(lager.Logger, worker.Worker, worker.VolumeClient, worker.ImageSpec, int, worker.ImageFetchingDelegate, atc.VersionedResourceTypes) (worker.Image, error) getImageMutex sync.RWMutex getImageArgsForCall []struct { arg1 lager.Logger @@ -19,7 +19,7 @@ type FakeImageFactory struct { arg4 worker.ImageSpec arg5 int arg6 worker.ImageFetchingDelegate - arg7 creds.VersionedResourceTypes + arg7 atc.VersionedResourceTypes } getImageReturns struct { result1 worker.Image @@ -33,7 +33,7 @@ type FakeImageFactory struct { invocationsMutex sync.RWMutex } -func (fake *FakeImageFactory) GetImage(arg1 lager.Logger, arg2 worker.Worker, arg3 worker.VolumeClient, arg4 worker.ImageSpec, arg5 int, arg6 worker.ImageFetchingDelegate, arg7 creds.VersionedResourceTypes) (worker.Image, error) { +func (fake *FakeImageFactory) GetImage(arg1 lager.Logger, arg2 worker.Worker, arg3 worker.VolumeClient, arg4 worker.ImageSpec, arg5 int, arg6 worker.ImageFetchingDelegate, arg7 atc.VersionedResourceTypes) (worker.Image, error) { fake.getImageMutex.Lock() ret, specificReturn := fake.getImageReturnsOnCall[len(fake.getImageArgsForCall)] fake.getImageArgsForCall = append(fake.getImageArgsForCall, struct { @@ -43,7 +43,7 @@ func (fake *FakeImageFactory) GetImage(arg1 lager.Logger, arg2 worker.Worker, ar arg4 worker.ImageSpec arg5 int arg6 worker.ImageFetchingDelegate - arg7 creds.VersionedResourceTypes + arg7 atc.VersionedResourceTypes }{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) fake.recordInvocation("GetImage", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) fake.getImageMutex.Unlock() @@ -63,13 +63,13 @@ func (fake *FakeImageFactory) GetImageCallCount() int { return len(fake.getImageArgsForCall) } -func (fake *FakeImageFactory) GetImageCalls(stub func(lager.Logger, worker.Worker, worker.VolumeClient, worker.ImageSpec, int, worker.ImageFetchingDelegate, creds.VersionedResourceTypes) (worker.Image, error)) { +func (fake *FakeImageFactory) GetImageCalls(stub func(lager.Logger, worker.Worker, worker.VolumeClient, worker.ImageSpec, int, worker.ImageFetchingDelegate, atc.VersionedResourceTypes) (worker.Image, error)) { fake.getImageMutex.Lock() defer fake.getImageMutex.Unlock() fake.GetImageStub = stub } -func (fake *FakeImageFactory) GetImageArgsForCall(i int) (lager.Logger, worker.Worker, worker.VolumeClient, worker.ImageSpec, int, worker.ImageFetchingDelegate, creds.VersionedResourceTypes) { +func (fake *FakeImageFactory) GetImageArgsForCall(i int) (lager.Logger, worker.Worker, worker.VolumeClient, worker.ImageSpec, int, worker.ImageFetchingDelegate, atc.VersionedResourceTypes) { fake.getImageMutex.RLock() defer fake.getImageMutex.RUnlock() argsForCall := fake.getImageArgsForCall[i] diff --git a/atc/worker/workerfakes/fake_pool.go b/atc/worker/workerfakes/fake_pool.go index c62908735..04cde297c 100644 --- a/atc/worker/workerfakes/fake_pool.go +++ b/atc/worker/workerfakes/fake_pool.go @@ -2,14 +2,31 @@ package workerfakes import ( + "context" "sync" "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc/db" + "github.com/concourse/concourse/atc/db/lock" "github.com/concourse/concourse/atc/worker" ) type FakePool struct { + AcquireContainerCreatingLockStub func(lager.Logger) (lock.Lock, bool, error) + acquireContainerCreatingLockMutex sync.RWMutex + acquireContainerCreatingLockArgsForCall []struct { + arg1 lager.Logger + } + acquireContainerCreatingLockReturns struct { + result1 lock.Lock + result2 bool + result3 error + } + acquireContainerCreatingLockReturnsOnCall map[int]struct { + result1 lock.Lock + result2 bool + result3 error + } FindOrChooseWorkerStub func(lager.Logger, worker.WorkerSpec) (worker.Worker, error) findOrChooseWorkerMutex sync.RWMutex findOrChooseWorkerArgsForCall []struct { @@ -24,14 +41,16 @@ type FakePool struct { result1 worker.Worker result2 error } - FindOrChooseWorkerForContainerStub func(lager.Logger, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy) (worker.Worker, error) + FindOrChooseWorkerForContainerStub func(context.Context, lager.Logger, db.ContainerOwner, worker.ContainerSpec, db.ContainerMetadata, worker.WorkerSpec, worker.ContainerPlacementStrategy) (worker.Worker, error) findOrChooseWorkerForContainerMutex sync.RWMutex findOrChooseWorkerForContainerArgsForCall []struct { - arg1 lager.Logger - arg2 db.ContainerOwner - arg3 worker.ContainerSpec - arg4 worker.WorkerSpec - arg5 worker.ContainerPlacementStrategy + arg1 context.Context + arg2 lager.Logger + arg3 db.ContainerOwner + arg4 worker.ContainerSpec + arg5 db.ContainerMetadata + arg6 worker.WorkerSpec + arg7 worker.ContainerPlacementStrategy } findOrChooseWorkerForContainerReturns struct { result1 worker.Worker @@ -45,6 +64,72 @@ type FakePool struct { invocationsMutex sync.RWMutex } +func (fake *FakePool) AcquireContainerCreatingLock(arg1 lager.Logger) (lock.Lock, bool, error) { + fake.acquireContainerCreatingLockMutex.Lock() + ret, specificReturn := fake.acquireContainerCreatingLockReturnsOnCall[len(fake.acquireContainerCreatingLockArgsForCall)] + fake.acquireContainerCreatingLockArgsForCall = append(fake.acquireContainerCreatingLockArgsForCall, struct { + arg1 lager.Logger + }{arg1}) + fake.recordInvocation("AcquireContainerCreatingLock", []interface{}{arg1}) + fake.acquireContainerCreatingLockMutex.Unlock() + if fake.AcquireContainerCreatingLockStub != nil { + return fake.AcquireContainerCreatingLockStub(arg1) + } + if specificReturn { + return ret.result1, ret.result2, ret.result3 + } + fakeReturns := fake.acquireContainerCreatingLockReturns + return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3 +} + +func (fake *FakePool) AcquireContainerCreatingLockCallCount() int { + fake.acquireContainerCreatingLockMutex.RLock() + defer fake.acquireContainerCreatingLockMutex.RUnlock() + return len(fake.acquireContainerCreatingLockArgsForCall) +} + +func (fake *FakePool) AcquireContainerCreatingLockCalls(stub func(lager.Logger) (lock.Lock, bool, error)) { + fake.acquireContainerCreatingLockMutex.Lock() + defer fake.acquireContainerCreatingLockMutex.Unlock() + fake.AcquireContainerCreatingLockStub = stub +} + +func (fake *FakePool) AcquireContainerCreatingLockArgsForCall(i int) lager.Logger { + fake.acquireContainerCreatingLockMutex.RLock() + defer fake.acquireContainerCreatingLockMutex.RUnlock() + argsForCall := fake.acquireContainerCreatingLockArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakePool) AcquireContainerCreatingLockReturns(result1 lock.Lock, result2 bool, result3 error) { + fake.acquireContainerCreatingLockMutex.Lock() + defer fake.acquireContainerCreatingLockMutex.Unlock() + fake.AcquireContainerCreatingLockStub = nil + fake.acquireContainerCreatingLockReturns = struct { + result1 lock.Lock + result2 bool + result3 error + }{result1, result2, result3} +} + +func (fake *FakePool) AcquireContainerCreatingLockReturnsOnCall(i int, result1 lock.Lock, result2 bool, result3 error) { + fake.acquireContainerCreatingLockMutex.Lock() + defer fake.acquireContainerCreatingLockMutex.Unlock() + fake.AcquireContainerCreatingLockStub = nil + if fake.acquireContainerCreatingLockReturnsOnCall == nil { + fake.acquireContainerCreatingLockReturnsOnCall = make(map[int]struct { + result1 lock.Lock + result2 bool + result3 error + }) + } + fake.acquireContainerCreatingLockReturnsOnCall[i] = struct { + result1 lock.Lock + result2 bool + result3 error + }{result1, result2, result3} +} + func (fake *FakePool) FindOrChooseWorker(arg1 lager.Logger, arg2 worker.WorkerSpec) (worker.Worker, error) { fake.findOrChooseWorkerMutex.Lock() ret, specificReturn := fake.findOrChooseWorkerReturnsOnCall[len(fake.findOrChooseWorkerArgsForCall)] @@ -109,20 +194,22 @@ func (fake *FakePool) FindOrChooseWorkerReturnsOnCall(i int, result1 worker.Work }{result1, result2} } -func (fake *FakePool) FindOrChooseWorkerForContainer(arg1 lager.Logger, arg2 db.ContainerOwner, arg3 worker.ContainerSpec, arg4 worker.WorkerSpec, arg5 worker.ContainerPlacementStrategy) (worker.Worker, error) { +func (fake *FakePool) FindOrChooseWorkerForContainer(arg1 context.Context, arg2 lager.Logger, arg3 db.ContainerOwner, arg4 worker.ContainerSpec, arg5 db.ContainerMetadata, arg6 worker.WorkerSpec, arg7 worker.ContainerPlacementStrategy) (worker.Worker, error) { fake.findOrChooseWorkerForContainerMutex.Lock() ret, specificReturn := fake.findOrChooseWorkerForContainerReturnsOnCall[len(fake.findOrChooseWorkerForContainerArgsForCall)] fake.findOrChooseWorkerForContainerArgsForCall = append(fake.findOrChooseWorkerForContainerArgsForCall, struct { - arg1 lager.Logger - arg2 db.ContainerOwner - arg3 worker.ContainerSpec - arg4 worker.WorkerSpec - arg5 worker.ContainerPlacementStrategy - }{arg1, arg2, arg3, arg4, arg5}) - fake.recordInvocation("FindOrChooseWorkerForContainer", []interface{}{arg1, arg2, arg3, arg4, arg5}) + arg1 context.Context + arg2 lager.Logger + arg3 db.ContainerOwner + arg4 worker.ContainerSpec + arg5 db.ContainerMetadata + arg6 worker.WorkerSpec + arg7 worker.ContainerPlacementStrategy + }{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) + fake.recordInvocation("FindOrChooseWorkerForContainer", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) fake.findOrChooseWorkerForContainerMutex.Unlock() if fake.FindOrChooseWorkerForContainerStub != nil { - return fake.FindOrChooseWorkerForContainerStub(arg1, arg2, arg3, arg4, arg5) + return fake.FindOrChooseWorkerForContainerStub(arg1, arg2, arg3, arg4, arg5, arg6, arg7) } if specificReturn { return ret.result1, ret.result2 @@ -137,17 +224,17 @@ func (fake *FakePool) FindOrChooseWorkerForContainerCallCount() int { return len(fake.findOrChooseWorkerForContainerArgsForCall) } -func (fake *FakePool) FindOrChooseWorkerForContainerCalls(stub func(lager.Logger, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy) (worker.Worker, error)) { +func (fake *FakePool) FindOrChooseWorkerForContainerCalls(stub func(context.Context, lager.Logger, db.ContainerOwner, worker.ContainerSpec, db.ContainerMetadata, worker.WorkerSpec, worker.ContainerPlacementStrategy) (worker.Worker, error)) { fake.findOrChooseWorkerForContainerMutex.Lock() defer fake.findOrChooseWorkerForContainerMutex.Unlock() fake.FindOrChooseWorkerForContainerStub = stub } -func (fake *FakePool) FindOrChooseWorkerForContainerArgsForCall(i int) (lager.Logger, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy) { +func (fake *FakePool) FindOrChooseWorkerForContainerArgsForCall(i int) (context.Context, lager.Logger, db.ContainerOwner, worker.ContainerSpec, db.ContainerMetadata, worker.WorkerSpec, worker.ContainerPlacementStrategy) { fake.findOrChooseWorkerForContainerMutex.RLock() defer fake.findOrChooseWorkerForContainerMutex.RUnlock() argsForCall := fake.findOrChooseWorkerForContainerArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7 } func (fake *FakePool) FindOrChooseWorkerForContainerReturns(result1 worker.Worker, result2 error) { @@ -179,6 +266,8 @@ func (fake *FakePool) FindOrChooseWorkerForContainerReturnsOnCall(i int, result1 func (fake *FakePool) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() + fake.acquireContainerCreatingLockMutex.RLock() + defer fake.acquireContainerCreatingLockMutex.RUnlock() fake.findOrChooseWorkerMutex.RLock() defer fake.findOrChooseWorkerMutex.RUnlock() fake.findOrChooseWorkerForContainerMutex.RLock() diff --git a/atc/worker/workerfakes/fake_worker.go b/atc/worker/workerfakes/fake_worker.go index 32d169891..2fea1f27e 100644 --- a/atc/worker/workerfakes/fake_worker.go +++ b/atc/worker/workerfakes/fake_worker.go @@ -9,33 +9,12 @@ import ( "code.cloudfoundry.org/garden" "code.cloudfoundry.org/lager" "github.com/concourse/concourse/atc" - "github.com/concourse/concourse/atc/creds" "github.com/concourse/concourse/atc/db" "github.com/concourse/concourse/atc/worker" "github.com/cppforlife/go-semi-semantic/version" ) type FakeWorker struct { - ActiveContainersStub func() int - activeContainersMutex sync.RWMutex - activeContainersArgsForCall []struct { - } - activeContainersReturns struct { - result1 int - } - activeContainersReturnsOnCall map[int]struct { - result1 int - } - ActiveVolumesStub func() int - activeVolumesMutex sync.RWMutex - activeVolumesArgsForCall []struct { - } - activeVolumesReturns struct { - result1 int - } - activeVolumesReturnsOnCall map[int]struct { - result1 int - } BuildContainersStub func() int buildContainersMutex sync.RWMutex buildContainersArgsForCall []struct { @@ -87,6 +66,20 @@ type FakeWorker struct { descriptionReturnsOnCall map[int]struct { result1 string } + EnsureDBContainerExistsStub func(context.Context, lager.Logger, db.ContainerOwner, db.ContainerMetadata) error + ensureDBContainerExistsMutex sync.RWMutex + ensureDBContainerExistsArgsForCall []struct { + arg1 context.Context + arg2 lager.Logger + arg3 db.ContainerOwner + arg4 db.ContainerMetadata + } + ensureDBContainerExistsReturns struct { + result1 error + } + ensureDBContainerExistsReturnsOnCall map[int]struct { + result1 error + } EphemeralStub func() bool ephemeralMutex sync.RWMutex ephemeralArgsForCall []struct { @@ -114,16 +107,15 @@ type FakeWorker struct { result2 bool result3 error } - FindOrCreateContainerStub func(context.Context, lager.Logger, worker.ImageFetchingDelegate, db.ContainerOwner, db.ContainerMetadata, worker.ContainerSpec, creds.VersionedResourceTypes) (worker.Container, error) + FindOrCreateContainerStub func(context.Context, lager.Logger, worker.ImageFetchingDelegate, db.ContainerOwner, worker.ContainerSpec, atc.VersionedResourceTypes) (worker.Container, error) findOrCreateContainerMutex sync.RWMutex findOrCreateContainerArgsForCall []struct { arg1 context.Context arg2 lager.Logger arg3 worker.ImageFetchingDelegate arg4 db.ContainerOwner - arg5 db.ContainerMetadata - arg6 worker.ContainerSpec - arg7 creds.VersionedResourceTypes + arg5 worker.ContainerSpec + arg6 atc.VersionedResourceTypes } findOrCreateContainerReturns struct { result1 worker.Container @@ -272,110 +264,6 @@ type FakeWorker struct { invocationsMutex sync.RWMutex } -func (fake *FakeWorker) ActiveContainers() int { - fake.activeContainersMutex.Lock() - ret, specificReturn := fake.activeContainersReturnsOnCall[len(fake.activeContainersArgsForCall)] - fake.activeContainersArgsForCall = append(fake.activeContainersArgsForCall, struct { - }{}) - fake.recordInvocation("ActiveContainers", []interface{}{}) - fake.activeContainersMutex.Unlock() - if fake.ActiveContainersStub != nil { - return fake.ActiveContainersStub() - } - if specificReturn { - return ret.result1 - } - fakeReturns := fake.activeContainersReturns - return fakeReturns.result1 -} - -func (fake *FakeWorker) ActiveContainersCallCount() int { - fake.activeContainersMutex.RLock() - defer fake.activeContainersMutex.RUnlock() - return len(fake.activeContainersArgsForCall) -} - -func (fake *FakeWorker) ActiveContainersCalls(stub func() int) { - fake.activeContainersMutex.Lock() - defer fake.activeContainersMutex.Unlock() - fake.ActiveContainersStub = stub -} - -func (fake *FakeWorker) ActiveContainersReturns(result1 int) { - fake.activeContainersMutex.Lock() - defer fake.activeContainersMutex.Unlock() - fake.ActiveContainersStub = nil - fake.activeContainersReturns = struct { - result1 int - }{result1} -} - -func (fake *FakeWorker) ActiveContainersReturnsOnCall(i int, result1 int) { - fake.activeContainersMutex.Lock() - defer fake.activeContainersMutex.Unlock() - fake.ActiveContainersStub = nil - if fake.activeContainersReturnsOnCall == nil { - fake.activeContainersReturnsOnCall = make(map[int]struct { - result1 int - }) - } - fake.activeContainersReturnsOnCall[i] = struct { - result1 int - }{result1} -} - -func (fake *FakeWorker) ActiveVolumes() int { - fake.activeVolumesMutex.Lock() - ret, specificReturn := fake.activeVolumesReturnsOnCall[len(fake.activeVolumesArgsForCall)] - fake.activeVolumesArgsForCall = append(fake.activeVolumesArgsForCall, struct { - }{}) - fake.recordInvocation("ActiveVolumes", []interface{}{}) - fake.activeVolumesMutex.Unlock() - if fake.ActiveVolumesStub != nil { - return fake.ActiveVolumesStub() - } - if specificReturn { - return ret.result1 - } - fakeReturns := fake.activeVolumesReturns - return fakeReturns.result1 -} - -func (fake *FakeWorker) ActiveVolumesCallCount() int { - fake.activeVolumesMutex.RLock() - defer fake.activeVolumesMutex.RUnlock() - return len(fake.activeVolumesArgsForCall) -} - -func (fake *FakeWorker) ActiveVolumesCalls(stub func() int) { - fake.activeVolumesMutex.Lock() - defer fake.activeVolumesMutex.Unlock() - fake.ActiveVolumesStub = stub -} - -func (fake *FakeWorker) ActiveVolumesReturns(result1 int) { - fake.activeVolumesMutex.Lock() - defer fake.activeVolumesMutex.Unlock() - fake.ActiveVolumesStub = nil - fake.activeVolumesReturns = struct { - result1 int - }{result1} -} - -func (fake *FakeWorker) ActiveVolumesReturnsOnCall(i int, result1 int) { - fake.activeVolumesMutex.Lock() - defer fake.activeVolumesMutex.Unlock() - fake.ActiveVolumesStub = nil - if fake.activeVolumesReturnsOnCall == nil { - fake.activeVolumesReturnsOnCall = make(map[int]struct { - result1 int - }) - } - fake.activeVolumesReturnsOnCall[i] = struct { - result1 int - }{result1} -} - func (fake *FakeWorker) BuildContainers() int { fake.buildContainersMutex.Lock() ret, specificReturn := fake.buildContainersReturnsOnCall[len(fake.buildContainersArgsForCall)] @@ -612,6 +500,69 @@ func (fake *FakeWorker) DescriptionReturnsOnCall(i int, result1 string) { }{result1} } +func (fake *FakeWorker) EnsureDBContainerExists(arg1 context.Context, arg2 lager.Logger, arg3 db.ContainerOwner, arg4 db.ContainerMetadata) error { + fake.ensureDBContainerExistsMutex.Lock() + ret, specificReturn := fake.ensureDBContainerExistsReturnsOnCall[len(fake.ensureDBContainerExistsArgsForCall)] + fake.ensureDBContainerExistsArgsForCall = append(fake.ensureDBContainerExistsArgsForCall, struct { + arg1 context.Context + arg2 lager.Logger + arg3 db.ContainerOwner + arg4 db.ContainerMetadata + }{arg1, arg2, arg3, arg4}) + fake.recordInvocation("EnsureDBContainerExists", []interface{}{arg1, arg2, arg3, arg4}) + fake.ensureDBContainerExistsMutex.Unlock() + if fake.EnsureDBContainerExistsStub != nil { + return fake.EnsureDBContainerExistsStub(arg1, arg2, arg3, arg4) + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.ensureDBContainerExistsReturns + return fakeReturns.result1 +} + +func (fake *FakeWorker) EnsureDBContainerExistsCallCount() int { + fake.ensureDBContainerExistsMutex.RLock() + defer fake.ensureDBContainerExistsMutex.RUnlock() + return len(fake.ensureDBContainerExistsArgsForCall) +} + +func (fake *FakeWorker) EnsureDBContainerExistsCalls(stub func(context.Context, lager.Logger, db.ContainerOwner, db.ContainerMetadata) error) { + fake.ensureDBContainerExistsMutex.Lock() + defer fake.ensureDBContainerExistsMutex.Unlock() + fake.EnsureDBContainerExistsStub = stub +} + +func (fake *FakeWorker) EnsureDBContainerExistsArgsForCall(i int) (context.Context, lager.Logger, db.ContainerOwner, db.ContainerMetadata) { + fake.ensureDBContainerExistsMutex.RLock() + defer fake.ensureDBContainerExistsMutex.RUnlock() + argsForCall := fake.ensureDBContainerExistsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *FakeWorker) EnsureDBContainerExistsReturns(result1 error) { + fake.ensureDBContainerExistsMutex.Lock() + defer fake.ensureDBContainerExistsMutex.Unlock() + fake.EnsureDBContainerExistsStub = nil + fake.ensureDBContainerExistsReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeWorker) EnsureDBContainerExistsReturnsOnCall(i int, result1 error) { + fake.ensureDBContainerExistsMutex.Lock() + defer fake.ensureDBContainerExistsMutex.Unlock() + fake.EnsureDBContainerExistsStub = nil + if fake.ensureDBContainerExistsReturnsOnCall == nil { + fake.ensureDBContainerExistsReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.ensureDBContainerExistsReturnsOnCall[i] = struct { + result1 error + }{result1} +} + func (fake *FakeWorker) Ephemeral() bool { fake.ephemeralMutex.Lock() ret, specificReturn := fake.ephemeralReturnsOnCall[len(fake.ephemeralArgsForCall)] @@ -732,7 +683,7 @@ func (fake *FakeWorker) FindContainerByHandleReturnsOnCall(i int, result1 worker }{result1, result2, result3} } -func (fake *FakeWorker) FindOrCreateContainer(arg1 context.Context, arg2 lager.Logger, arg3 worker.ImageFetchingDelegate, arg4 db.ContainerOwner, arg5 db.ContainerMetadata, arg6 worker.ContainerSpec, arg7 creds.VersionedResourceTypes) (worker.Container, error) { +func (fake *FakeWorker) FindOrCreateContainer(arg1 context.Context, arg2 lager.Logger, arg3 worker.ImageFetchingDelegate, arg4 db.ContainerOwner, arg5 worker.ContainerSpec, arg6 atc.VersionedResourceTypes) (worker.Container, error) { fake.findOrCreateContainerMutex.Lock() ret, specificReturn := fake.findOrCreateContainerReturnsOnCall[len(fake.findOrCreateContainerArgsForCall)] fake.findOrCreateContainerArgsForCall = append(fake.findOrCreateContainerArgsForCall, struct { @@ -740,14 +691,13 @@ func (fake *FakeWorker) FindOrCreateContainer(arg1 context.Context, arg2 lager.L arg2 lager.Logger arg3 worker.ImageFetchingDelegate arg4 db.ContainerOwner - arg5 db.ContainerMetadata - arg6 worker.ContainerSpec - arg7 creds.VersionedResourceTypes - }{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) - fake.recordInvocation("FindOrCreateContainer", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7}) + arg5 worker.ContainerSpec + arg6 atc.VersionedResourceTypes + }{arg1, arg2, arg3, arg4, arg5, arg6}) + fake.recordInvocation("FindOrCreateContainer", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6}) fake.findOrCreateContainerMutex.Unlock() if fake.FindOrCreateContainerStub != nil { - return fake.FindOrCreateContainerStub(arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return fake.FindOrCreateContainerStub(arg1, arg2, arg3, arg4, arg5, arg6) } if specificReturn { return ret.result1, ret.result2 @@ -762,17 +712,17 @@ func (fake *FakeWorker) FindOrCreateContainerCallCount() int { return len(fake.findOrCreateContainerArgsForCall) } -func (fake *FakeWorker) FindOrCreateContainerCalls(stub func(context.Context, lager.Logger, worker.ImageFetchingDelegate, db.ContainerOwner, db.ContainerMetadata, worker.ContainerSpec, creds.VersionedResourceTypes) (worker.Container, error)) { +func (fake *FakeWorker) FindOrCreateContainerCalls(stub func(context.Context, lager.Logger, worker.ImageFetchingDelegate, db.ContainerOwner, worker.ContainerSpec, atc.VersionedResourceTypes) (worker.Container, error)) { fake.findOrCreateContainerMutex.Lock() defer fake.findOrCreateContainerMutex.Unlock() fake.FindOrCreateContainerStub = stub } -func (fake *FakeWorker) FindOrCreateContainerArgsForCall(i int) (context.Context, lager.Logger, worker.ImageFetchingDelegate, db.ContainerOwner, db.ContainerMetadata, worker.ContainerSpec, creds.VersionedResourceTypes) { +func (fake *FakeWorker) FindOrCreateContainerArgsForCall(i int) (context.Context, lager.Logger, worker.ImageFetchingDelegate, db.ContainerOwner, worker.ContainerSpec, atc.VersionedResourceTypes) { fake.findOrCreateContainerMutex.RLock() defer fake.findOrCreateContainerMutex.RUnlock() argsForCall := fake.findOrCreateContainerArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6 } func (fake *FakeWorker) FindOrCreateContainerReturns(result1 worker.Container, result2 error) { @@ -1442,10 +1392,6 @@ func (fake *FakeWorker) UptimeReturnsOnCall(i int, result1 time.Duration) { func (fake *FakeWorker) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.activeContainersMutex.RLock() - defer fake.activeContainersMutex.RUnlock() - fake.activeVolumesMutex.RLock() - defer fake.activeVolumesMutex.RUnlock() fake.buildContainersMutex.RLock() defer fake.buildContainersMutex.RUnlock() fake.certsVolumeMutex.RLock() @@ -1454,6 +1400,8 @@ func (fake *FakeWorker) Invocations() map[string][][]interface{} { defer fake.createVolumeMutex.RUnlock() fake.descriptionMutex.RLock() defer fake.descriptionMutex.RUnlock() + fake.ensureDBContainerExistsMutex.RLock() + defer fake.ensureDBContainerExistsMutex.RUnlock() fake.ephemeralMutex.RLock() defer fake.ephemeralMutex.RUnlock() fake.findContainerByHandleMutex.RLock() diff --git a/fly/commands/completion.go b/fly/commands/completion.go new file mode 100644 index 000000000..12fd3b7a7 --- /dev/null +++ b/fly/commands/completion.go @@ -0,0 +1,38 @@ +package commands + +import "fmt" + +type CompletionCommand struct { + Shell string `long:"shell" required:"true" choice:"bash" choice:"zsh"` // add more choices later +} + +// credits: +// https://godoc.org/github.com/jessevdk/go-flags#hdr-Completion +// https://github.com/concourse/concourse/issues/1309#issuecomment-452893900 +const bashCompletionSnippet = `_fly_compl() { + args=("${COMP_WORDS[@]:1:$COMP_CWORD}") + local IFS=$'\n' + COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}")) + return 0 +} +complete -F _fly_compl fly +` + +// initial implemenation just using bashcompinit +const zshCompletionSnippet = `autoload -Uz compinit && compinit +autoload -Uz bashcompinit && bashcompinit +` + bashCompletionSnippet + +func (command *CompletionCommand) Execute([]string) error { + switch command.Shell { + case "bash": + _, err := fmt.Print(bashCompletionSnippet) + return err + case "zsh": + _, err := fmt.Print(zshCompletionSnippet) + return err + default: + // this should be unreachable + return fmt.Errorf("unknown shell %s", command.Shell) + } +} diff --git a/fly/commands/fly.go b/fly/commands/fly.go index d402fca72..6c3ca833e 100644 --- a/fly/commands/fly.go +++ b/fly/commands/fly.go @@ -74,6 +74,8 @@ type FlyCommand struct { PruneWorker PruneWorkerCommand `command:"prune-worker" alias:"pw" description:"Prune a stalled, landing, landed, or retiring worker"` Curl CurlCommand `command:"curl" alias:"c" description:"curl the api"` + + Completion CompletionCommand `command:"completion" description:"generate shell completion code"` } var Fly FlyCommand diff --git a/fly/commands/get_team.go b/fly/commands/get_team.go index 7ae584031..0a5e80a19 100644 --- a/fly/commands/get_team.go +++ b/fly/commands/get_team.go @@ -15,7 +15,7 @@ import ( type GetTeamCommand struct { Team string `short:"n" long:"team" required:"true" description:"Get configuration of this team"` - Json bool `short:"j" long:"json" description:"Print config as json instead of yaml"` + JSON bool `short:"j" long:"json" description:"Print command result as JSON"` } func (command *GetTeamCommand) Execute(args []string) error { @@ -37,7 +37,7 @@ func (command *GetTeamCommand) Execute(args []string) error { return errors.New("team not found") } - if command.Json { + if command.JSON { if err := displayhelpers.JsonPrint(team); err != nil { return err } diff --git a/package.json b/package.json index 71b80e1f7..e6fea5191 100644 --- a/package.json +++ b/package.json @@ -6,26 +6,29 @@ "license": "Apache-2.0", "dependencies": {}, "devDependencies": { + "@mdi/svg": "^3.5.95", "child-process-promise": "^2.2.1", "chokidar-cli": "^1.2.1", + "clean-css-cli": "^4.3.0", "elm": "^0.19.0-bugfix6", "elm-analyse": "^0.16.2", "elm-format": "0.8.1", "elm-test": "^0.19.0-rev6", "less": "^3.0.2", "less-plugin-autoprefix": "^1.5.1", - "less-plugin-clean-css": "^1.5.1", - "@mdi/svg": "^3.5.95", "puppeteer": "^1.12.2", "uglify-js": "^3.3.22" }, + "resolutions": { + "less/request": "^2.86.0" + }, "scripts": { "format": "elm-format --elm-version=0.19 web/elm --yes", "analyse": "cd web/elm && elm-analyse", "build": "yarn run build-less && yarn run build-elm", "build-debug": "yarn run build-less && yarn run build-elm-debug", "test": "cd web/elm && elm-test", - "build-less": "lessc --clean-css=--advanced web/assets/css/main.less web/public/main.css", + "build-less": "lessc web/assets/css/main.less web/public/main.out.css && cleancss -o web/public/main.css web/public/main.out.css && rm web/public/main.out.css", "build-elm": "cd web/elm && elm make --optimize --output ../public/elm.js src/Main.elm && uglifyjs ../public/elm.js --compress 'pure_funcs=[F2,F3,F4,F5,F6,F7,F8,F9,A2,A3,A4,A5,A6,A7,A8,A9],pure_getters,keep_fargs=false,unsafe_comps,unsafe' | uglifyjs --mangle --output=../public/elm.min.js", "build-elm-debug": "cd web/elm && elm make --output ../public/elm.js src/Main.elm && uglifyjs < ../public/elm.js > ../public/elm.min.js", "watch": "chokidar -i elm-stuff 'web/elm/src/**/*.elm' 'web/assets/css/*.less' -c 'yarn run build-debug' --initial", diff --git a/topgun/k8s/dns_proxy_test.go b/topgun/k8s/dns_proxy_test.go new file mode 100644 index 000000000..ff1ebba45 --- /dev/null +++ b/topgun/k8s/dns_proxy_test.go @@ -0,0 +1,119 @@ +package k8s_test + +import ( + "github.com/onsi/gomega/gexec" + "time" + + . "github.com/concourse/concourse/topgun" + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" +) + +var _ = Describe("DNS Resolution", func() { + + var ( + atcEndpoint string + proxySession *gexec.Session + ) + + BeforeEach(func() { + setReleaseNameAndNamespace("dp") + }) + + var setupDeployment = func(dnsProxyEnable, dnsServer string) { + args := []string{ + `--set=worker.replicas=1`, + `--set-string=concourse.worker.garden.dnsProxyEnable=` + dnsProxyEnable, + } + if dnsServer != "" { + args = append(args, + `--set=worker.env[0].name=CONCOURSE_GARDEN_DNS_SERVER`, + `--set=worker.env[0].value=`+dnsServer) + } + deployConcourseChart(releaseName, args...) + waitAllPodsInNamespaceToBeReady(namespace) + + By("Creating the web proxy") + proxySession, atcEndpoint = startPortForwarding(namespace, "service/"+releaseName+"-web", "8080") + + By("Logging in") + fly.Login("test", "test", atcEndpoint) + + By("waiting for a running worker") + Eventually(func() []Worker { + return getRunningWorkers(fly.GetWorkers()) + }, 2*time.Minute, 10*time.Second). + ShouldNot(HaveLen(0)) + } + + var fullAddress = func() string { + return releaseName + "-web." + namespace + ".svc.cluster.local:8080/api/v1/info" + } + + var shortAddress = func() string { + return releaseName + "-web:8080/api/v1/info" + } + + AfterEach(func() { + cleanup(releaseName, namespace, proxySession) + }) + + type Case struct { + // args + enableDnsProxy string + dnsServer string + addressFunction func() string + + // expectations + shouldWork bool + } + + DescribeTable("different proxy settings", + func(c Case) { + setupDeployment(c.enableDnsProxy, c.dnsServer) + + sess := fly.Start("execute", "-c", "../tasks/dns-proxy-task.yml", "-v", "url="+c.addressFunction()) + <-sess.Exited + + if !c.shouldWork { + Expect(sess.ExitCode()).ToNot(BeZero()) + return + } + + Expect(sess.ExitCode()).To(BeZero()) + }, + Entry("Proxy Enabled, with full service name", Case{ + enableDnsProxy: "true", + addressFunction: fullAddress, + shouldWork: true, + }), + Entry("Proxy Enabled, with short service name", Case{ + enableDnsProxy: "true", + addressFunction: shortAddress, + shouldWork: false, + }), + Entry("Proxy Disabled, with full service name", Case{ + enableDnsProxy: "false", + addressFunction: fullAddress, + shouldWork: true, + }), + Entry("Proxy Disabled, with short service name", Case{ + enableDnsProxy: "false", + addressFunction: shortAddress, + shouldWork: true, + }), + Entry("Adding extra dns server, with Proxy Disabled and full address", Case{ + enableDnsProxy: "false", + dnsServer: "8.8.8.8", + addressFunction: fullAddress, + shouldWork: false, + }), + Entry("Adding extra dns server, with Proxy Enabled and full address", Case{ + enableDnsProxy: "true", + dnsServer: "8.8.8.8", + addressFunction: fullAddress, + shouldWork: false, + }), + ) +}) diff --git a/topgun/k8s/kubernetes_creds_mgmt_test.go b/topgun/k8s/kubernetes_creds_mgmt_test.go index 8a4bd2600..f06ae5a10 100644 --- a/topgun/k8s/kubernetes_creds_mgmt_test.go +++ b/topgun/k8s/kubernetes_creds_mgmt_test.go @@ -17,7 +17,6 @@ var _ = Describe("Kubernetes credential management", func() { atcEndpoint string username = "test" password = "test" - extraArgs []string ) BeforeEach(func() { @@ -25,11 +24,6 @@ var _ = Describe("Kubernetes credential management", func() { }) JustBeforeEach(func() { - - deployConcourseChart(releaseName, append([]string{ - "--set=worker.replicas=1", - }, extraArgs...)...) - waitAllPodsInNamespaceToBeReady(namespace) By("Creating the web proxy") @@ -54,7 +48,11 @@ var _ = Describe("Kubernetes credential management", func() { } `json:"kubernetes"` } - JustBeforeEach(func() { + BeforeEach(func() { + deployConcourseChart(releaseName, "--set=worker.replicas=1") + }) + + It("Contains kubernetes config", func() { token, err := FetchToken(atcEndpoint, username, password) Expect(err).ToNot(HaveOccurred()) @@ -63,74 +61,124 @@ var _ = Describe("Kubernetes credential management", func() { err = json.Unmarshal(body, &parsedResponse) Expect(err).ToNot(HaveOccurred()) - }) - It("Contains kubernetes config", func() { Expect(parsedResponse.Kubernetes.ConfigPath).To(BeEmpty()) Expect(parsedResponse.Kubernetes.InClusterConfig).To(BeTrue()) Expect(parsedResponse.Kubernetes.NamespaceConfig).To(Equal(releaseName + "-")) }) }) - Context("Consuming per-team k8s secrets", func() { - - JustBeforeEach(func() { - // ((foo)) --> bar - createCredentialSecret(releaseName, "foo", "main", map[string]string{"value": "bar"}) - - // ((caz.baz)) --> zaz - createCredentialSecret(releaseName, "caz", "main", map[string]string{"baz": "zaz"}) - - fly.Run("set-pipeline", "-n", - "-c", "../pipelines/minimal-credential-management.yml", - "-p", "pipeline", + Context("Consuming k8s credentials", func() { + var cachingSetup = func() { + deployConcourseChart(releaseName, "--set=worker.replicas=1", + "--set=concourse.web.secretCacheEnabled=true", + "--set=concourse.web.secretCacheDuration=600", ) - - fly.Run("unpause-pipeline", "-p", "pipeline") - - }) - - var runsBuildWithCredentialsResolved = func() { - session := fly.Start("trigger-job", "-j", "pipeline/unit", "-w") - Wait(session) - - Expect(string(session.Out.Contents())).To(ContainSubstring("bar")) - Expect(string(session.Out.Contents())).To(ContainSubstring("zaz")) } - Context("using the default namespace created by the chart", func() { - It("succeeds", runsBuildWithCredentialsResolved) + var disableTeamNamespaces = func() { + By("creating a namespace made by the user instead of the chart") + Run(nil, "kubectl", "create", "namespace", releaseName+"-main") + + deployConcourseChart(releaseName, "--set=worker.replicas=1", + "--set=concourse.web.secretCacheEnabled=true", + "--set=concourse.web.secretCacheDuration=600", + "--set=concourse.web.kubernetes.createTeamNamespaces=false", + ) + } + + Context("using per-team credentials", func() { + secretNameFoo := "foo" + secretNameCaz := "caz" + + Context("using the default namespace created by the chart", func() { + BeforeEach(func() { + deployConcourseChart(releaseName, "--set=worker.replicas=1") + }) + + It("succeeds", func() { + runsBuildWithCredentialsResolved(secretNameFoo, secretNameCaz) + }) + }) + + Context("with caching enabled", func() { + BeforeEach(cachingSetup) + + It("gets cached credentials", func() { + runGetsCachedCredentials(secretNameFoo, secretNameCaz) + }) + }) + + Context("using a user-provided namespace", func() { + BeforeEach(disableTeamNamespaces) + + It("succeeds", func() { + runsBuildWithCredentialsResolved(secretNameFoo, secretNameCaz) + }) + + AfterEach(func() { + Run(nil, "kubectl", "delete", "namespace", releaseName+"-main", "--wait=false") + }) + }) + }) - Context("with caching enabled", func() { - BeforeEach(func() { - extraArgs = []string{ - "--set=concourse.web.secretCacheEnabled=true", - "--set=concourse.web.secretCacheDuration=600", - } + Context("using per-pipeline credentials", func() { + secretNameFoo := "pipeline.foo" + secretNameCaz := "pipeline.caz" + + Context("using the default namespace created by the chart", func() { + BeforeEach(func() { + deployConcourseChart(releaseName, "--set=worker.replicas=1") + }) + + It("succeeds", func() { + runsBuildWithCredentialsResolved(secretNameFoo, secretNameCaz) + }) }) - It("gets cached credentials", func() { - runsBuildWithCredentialsResolved() - deleteSecret(releaseName, "main", "foo") - deleteSecret(releaseName, "main", "caz") - runsBuildWithCredentialsResolved() + Context("with caching enabled", func() { + BeforeEach(cachingSetup) + + It("gets cached credentials", func() { + runGetsCachedCredentials(secretNameFoo, secretNameCaz) + }) + }) + + Context("using a user-provided namespace", func() { + BeforeEach(disableTeamNamespaces) + + It("succeeds", func() { + runsBuildWithCredentialsResolved(secretNameFoo, secretNameCaz) + }) + + AfterEach(func() { + Run(nil, "kubectl", "delete", "namespace", releaseName+"-main", "--wait=false") + }) }) }) + }) - Context("using a user-provided namespace", func() { - BeforeEach(func() { - Run(nil, "kubectl", "create", "namespace", releaseName+"-main") - extraArgs = []string{ - "--set=concourse.web.kubernetes.createTeamNamespaces=false", - } - }) + Context("one-off build", func() { + BeforeEach(func() { + deployConcourseChart(releaseName, "--set=worker.replicas=1") + }) - It("succeeds", runsBuildWithCredentialsResolved) + It("runs the one-off build successfully", func() { + By("creating the secret in the main team") + createCredentialSecret(releaseName, "some-secret", "main", map[string]string{"value": "mysecret"}) - AfterEach(func() { - Run(nil, "kubectl", "delete", "namespace", releaseName+"-main", "--wait=false") - }) + By("successfully running the one-off build") + fly.Run("execute", + "-c", "../tasks/simple-secret.yml") + }) + + It("one-off build fails", func() { + By("not creating the secret") + sess := fly.Start("execute", + "-c", "../tasks/simple-secret.yml") + <-sess.Exited + Expect(sess.ExitCode()).NotTo(Equal(0)) }) }) @@ -159,3 +207,31 @@ func createCredentialSecret(releaseName, secretName, team string, kv map[string] Run(nil, "kubectl", args...) } + +func runsBuildWithCredentialsResolved(normalSecret string, specialKeySecret string) { + By("creating credentials in k8s credential manager") + createCredentialSecret(releaseName, normalSecret, "main", map[string]string{"value": "bar"}) + createCredentialSecret(releaseName, specialKeySecret, "main", map[string]string{"baz": "zaz"}) + + fly.Run("set-pipeline", "-n", + "-c", "../pipelines/minimal-credential-management.yml", + "-p", "pipeline", + ) + + fly.Run("unpause-pipeline", "-p", "pipeline") + + session := fly.Start("trigger-job", "-j", "pipeline/unit", "-w") + Wait(session) + + By("seeing the credentials were resolved by concourse") + Expect(string(session.Out.Contents())).To(ContainSubstring("bar")) + Expect(string(session.Out.Contents())).To(ContainSubstring("zaz")) +} + +func runGetsCachedCredentials(secretNameFoo string, secretNameCaz string) { + runsBuildWithCredentialsResolved(secretNameFoo, secretNameCaz) + deleteSecret(releaseName, "main", secretNameFoo) + deleteSecret(releaseName, "main", secretNameCaz) + By("seeing that concourse uses the cached credentials") + runsBuildWithCredentialsResolved(secretNameFoo, secretNameCaz) +} diff --git a/topgun/k8s/web_scaling_test.go b/topgun/k8s/web_scaling_test.go new file mode 100644 index 000000000..ccd96c98a --- /dev/null +++ b/topgun/k8s/web_scaling_test.go @@ -0,0 +1,49 @@ +package k8s_test + +import ( + "strconv" + "time" + + . "github.com/concourse/concourse/topgun" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Scaling web instances", func() { + + BeforeEach(func() { + setReleaseNameAndNamespace("swi") + }) + + AfterEach(func() { + cleanup(releaseName, namespace, nil) + }) + + It("succeeds", func() { + successfullyDeploysConcourse(1, 1) + successfullyDeploysConcourse(0, 1) + successfullyDeploysConcourse(2, 1) + }) +}) + +func successfullyDeploysConcourse(webReplicas, workerReplicas int) { + deployConcourseChart(releaseName, + "--set=web.replicas="+strconv.Itoa(webReplicas), + "--set=worker.replicas="+strconv.Itoa(workerReplicas), + ) + + waitAllPodsInNamespaceToBeReady(namespace) + + By("Creating the web proxy") + proxySession, atcEndpoint := startPortForwarding(namespace, "service/"+releaseName+"-web", "8080") + defer proxySession.Interrupt() + + By("Logging in") + fly.Login("test", "test", atcEndpoint) + + By("waiting for a running worker") + Eventually(func() []Worker { + return getRunningWorkers(fly.GetWorkers()) + }, 2*time.Minute, 10*time.Second). + Should(HaveLen(workerReplicas)) +} diff --git a/topgun/operations/bbr-concourse-link.yml b/topgun/operations/bbr-concourse-link.yml index 36399d39a..f87093f10 100644 --- a/topgun/operations/bbr-concourse-link.yml +++ b/topgun/operations/bbr-concourse-link.yml @@ -1,3 +1,33 @@ +# use non-default values to ensure concourse_db bosh links are used +- type: replace + path: /instance_groups/name=web/jobs/name=web/properties/postgresql/role + value: + name: concourse + password: dummy-password + +- type: replace + path: /instance_groups/name=db/jobs/name=postgres/properties/databases/roles/0 + value: + name: concourse + password: dummy-password + +- type: replace + path: /instance_groups/name=web/jobs/name=web/properties/postgresql/database + value: atcdb + +- type: replace + path: /instance_groups/name=db/jobs/name=postgres/properties/databases/databases/0 + value: + name: atcdb + +- type: replace + path: /instance_groups/name=web/jobs/name=web/properties/postgresql/port? + value: 5433 + +- type: replace + path: /instance_groups/name=db/jobs/name=postgres/properties/databases/port + value: 5433 + - type: replace path: /releases/- value: diff --git a/topgun/operations/bbr-with-properties.yml b/topgun/operations/bbr-with-properties.yml index dbe921d0c..b113e231b 100644 --- a/topgun/operations/bbr-with-properties.yml +++ b/topgun/operations/bbr-with-properties.yml @@ -16,6 +16,35 @@ release: backup-and-restore-sdk properties: {} +- type: replace + path: /instance_groups/name=web/jobs/name=web/properties/postgresql/role + value: + name: concourse + password: dummy-password + +- type: replace + path: /instance_groups/name=db/jobs/name=postgres/properties/databases/roles/0 + value: + name: concourse + password: dummy-password + +- type: replace + path: /instance_groups/name=web/jobs/name=web/properties/postgresql/database + value: atcdb + +- type: replace + path: /instance_groups/name=db/jobs/name=postgres/properties/databases/databases/0 + value: + name: atcdb + +- type: replace + path: /instance_groups/name=web/jobs/name=web/properties/postgresql/port? + value: 5433 + +- type: replace + path: /instance_groups/name=db/jobs/name=postgres/properties/databases/port + value: 5433 + - type: replace path: /instance_groups/name=db/jobs/- value: @@ -23,7 +52,8 @@ name: bbr-atcdb properties: postgresql: - database: atc + database: atcdb + port: 5433 role: - name: atc + name: concourse password: dummy-password diff --git a/topgun/pipelines/lots-ata-time-2.yml b/topgun/pipelines/lots-ata-time-2.yml index e1638f807..f3f3b433c 100644 --- a/topgun/pipelines/lots-ata-time-2.yml +++ b/topgun/pipelines/lots-ata-time-2.yml @@ -9,84 +9,212 @@ jobs: plan: - get: timer trigger: true - - task: exit - config: - platform: linux - image_resource: - type: registry-image - source: {repository: concourse/dev} - run: - path: /bin/sh - args: - - -c - - | - exit 2 + - in_parallel: + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 - name: exiter2 plan: - get: timer trigger: true - - task: exit - config: - platform: linux - image_resource: - type: registry-image - source: {repository: concourse/dev} - run: - path: /bin/sh - args: - - -c - - | - exit 2 + - in_parallel: + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 - name: exiter3 plan: - get: timer trigger: true - - task: exit - config: - platform: linux - image_resource: - type: registry-image - source: {repository: concourse/dev} - run: - path: /bin/sh - args: - - -c - - | - exit 2 + - in_parallel: + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - name: exiter4 plan: - get: timer trigger: true - - task: exit - config: - platform: linux - image_resource: - type: registry-image - source: {repository: concourse/dev} - run: - path: /bin/sh - args: - - -c - - | - exit 2 + - in_parallel: + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - name: exiter5 plan: - get: timer trigger: true - - task: exit - config: - platform: linux - image_resource: - type: registry-image - source: {repository: concourse/dev} - run: - path: /bin/sh - args: - - -c - - | - exit 2 + - in_parallel: + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - name: exiter6 plan: - get: timer diff --git a/topgun/pipelines/lots-ata-time.yml b/topgun/pipelines/lots-ata-time.yml index b28e065d8..eb53617b3 100644 --- a/topgun/pipelines/lots-ata-time.yml +++ b/topgun/pipelines/lots-ata-time.yml @@ -9,84 +9,212 @@ jobs: plan: - get: timer trigger: true - - task: exit - config: - platform: linux - image_resource: - type: registry-image - source: {repository: concourse/dev} - run: - path: /bin/sh - args: - - -c - - | - exit 2 + - in_parallel: + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 - name: exiter2 plan: - get: timer trigger: true - - task: exit - config: - platform: linux - image_resource: - type: registry-image - source: {repository: concourse/dev} - run: - path: /bin/sh - args: - - -c - - | - exit 2 + - in_parallel: + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 - name: exiter3 plan: - get: timer trigger: true - - task: exit - config: - platform: linux - image_resource: - type: registry-image - source: {repository: concourse/dev} - run: - path: /bin/sh - args: - - -c - - | - exit 2 + - in_parallel: + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - name: exiter4 plan: - get: timer trigger: true - - task: exit - config: - platform: linux - image_resource: - type: registry-image - source: {repository: concourse/dev} - run: - path: /bin/sh - args: - - -c - - | - exit 2 + - in_parallel: + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - name: exiter5 plan: - get: timer trigger: true - - task: exit - config: - platform: linux - image_resource: - type: registry-image - source: {repository: concourse/dev} - run: - path: /bin/sh - args: - - -c - - | - exit 2 + - in_parallel: + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - task: exit + config: + platform: linux + image_resource: + type: registry-image + source: {repository: concourse/dev} + run: + path: /bin/sh + args: + - -c + - | + exit 2 + - name: exiter6 plan: - get: timer diff --git a/topgun/tasks/dns-proxy-task.yml b/topgun/tasks/dns-proxy-task.yml new file mode 100644 index 000000000..ce1218358 --- /dev/null +++ b/topgun/tasks/dns-proxy-task.yml @@ -0,0 +1,12 @@ +--- +platform: linux + +image_resource: + type: registry-image + source: + repository: busybox +run: + path: "wget" + args: + - "-O-" + - "((url))" diff --git a/topgun/tasks/simple-secret.yml b/topgun/tasks/simple-secret.yml new file mode 100644 index 000000000..520c09cb0 --- /dev/null +++ b/topgun/tasks/simple-secret.yml @@ -0,0 +1,12 @@ +--- +platform: linux + +image_resource: + type: docker-image + source: + repository: busybox +run: + path: sh + args: + - -c + - echo "((some-secret))" \ No newline at end of file diff --git a/topgun/worker_landing_test.go b/topgun/worker_landing_test.go index 3c4296180..b36fa0f23 100644 --- a/topgun/worker_landing_test.go +++ b/topgun/worker_landing_test.go @@ -122,20 +122,7 @@ var _ = Describe("Worker landing", func() { preservedContainerID = string(hijackSession.Out.Contents()) }) - //TODO: This test, as written, may not be valuable. - // It is failing because sometimes, by the - // time you hijack, there is already a new - // check container scheduled on the worker. - // In this case fly hijack will show a - // prompt to choose a container. Though - // it is logical to only have one check - // container per session, this behaviour - // is not harmful. It's possible this test - // should be revised to test something more - // critical, like task containers not being - // killed or recreated when interrupted by - // a worker restart. - XIt("keeps volumes and containers after restart", func() { + It("keeps volumes and containers after restart", func() { By("starting the worker back up") waitForLandedWorker() startLandedWorker(landingWorkerInstance) diff --git a/web/assets/css/production.less b/web/assets/css/production.less index 250c54077..292ffb140 100644 --- a/web/assets/css/production.less +++ b/web/assets/css/production.less @@ -164,10 +164,6 @@ li.prep-status { } } -.step-collapsed { - display: none; -} - .step-body { padding: 10px; } diff --git a/web/elm/src/Application/Application.elm b/web/elm/src/Application/Application.elm index c326b2377..a665a2ff7 100644 --- a/web/elm/src/Application/Application.elm +++ b/web/elm/src/Application/Application.elm @@ -42,7 +42,6 @@ type alias Flags = , notFoundImgSrc : String , csrfToken : Concourse.CSRFToken , authToken : String - , clusterName : String , pipelineRunningKeyframes : String } @@ -64,7 +63,7 @@ init flags url = session = { userState = UserStateUnknown , hovered = HoverState.NoHover - , clusterName = flags.clusterName + , clusterName = "" , turbulenceImgSrc = flags.turbulenceImgSrc , notFoundImgSrc = flags.notFoundImgSrc , csrfToken = flags.csrfToken @@ -98,7 +97,7 @@ init flags url = ] in ( model - , [ FetchUser, GetScreenSize, LoadSideBarState ] + , [ FetchUser, GetScreenSize, LoadSideBarState, FetchClusterInfo ] ++ handleTokenEffect ++ subEffects ) @@ -199,6 +198,16 @@ handleCallback callback model = in subpageHandleCallback callback ( { model | session = newSession }, [] ) + ClusterInfoFetched (Ok { clusterName }) -> + let + session = + model.session + + newSession = + { session | clusterName = clusterName } + in + subpageHandleCallback callback ( { model | session = newSession }, [] ) + ScreenResized viewport -> let session = diff --git a/web/elm/src/Build/Build.elm b/web/elm/src/Build/Build.elm index 5bc6bb806..cfe6e04b5 100644 --- a/web/elm/src/Build/Build.elm +++ b/web/elm/src/Build/Build.elm @@ -14,7 +14,14 @@ module Build.Build exposing ) import Application.Models exposing (Session) -import Build.Models exposing (BuildPageType(..), CurrentBuild, Model) +import Build.Models + exposing + ( BuildPageType(..) + , CurrentBuild + , CurrentOutput(..) + , Model + , toMaybe + ) import Build.Output.Models exposing (OutputModel) import Build.Output.Output import Build.StepTree.Models as STModels @@ -122,7 +129,7 @@ subscriptions model = buildEventsUrl = model.currentBuild |> RemoteData.toMaybe - |> Maybe.andThen .output + |> Maybe.andThen (.output >> toMaybe) |> Maybe.andThen .eventStreamUrlPath in [ OnClockTick OneSecond @@ -152,7 +159,7 @@ changeToBuild { highlight, pageType } ( model, effects ) = newBuild = RemoteData.map - (\cb -> { cb | prep = Nothing, output = Nothing }) + (\cb -> { cb | prep = Nothing, output = Empty }) model.currentBuild in ( { model @@ -273,24 +280,28 @@ handleCallback action ( model, effects ) = ] ) - PlanAndResourcesFetched buildId (Err err) -> + PlanAndResourcesFetched _ (Err err) -> case err of Http.BadStatus { status } -> - if status.code == 404 then - let - url = - "/api/v1/builds/" - ++ String.fromInt buildId - ++ "/events" - in - updateOutput - (\m -> - ( { m | eventStreamUrlPath = Just url } - , [] - , Build.Output.Output.OutNoop - ) - ) - ( model, effects ) + let + isAborted = + model.currentBuild + |> RemoteData.map + (.build + >> .status + >> (==) Concourse.BuildStatusAborted + ) + |> RemoteData.withDefault False + in + if status.code == 404 && isAborted then + ( { model + | currentBuild = + RemoteData.map + (\cb -> { cb | output = Cancelled }) + model.currentBuild + } + , effects + ) else if status.code == 401 then ( { model | authorized = False }, effects ) @@ -357,7 +368,7 @@ handleDelivery session delivery ( model, effects ) = eventSourceClosed = model.currentBuild |> RemoteData.toMaybe - |> Maybe.andThen .output + |> Maybe.andThen (.output >> toMaybe) |> Maybe.map (.eventSourceOpened >> not) |> Maybe.withDefault False @@ -580,14 +591,14 @@ updateOutput updater ( model, effects ) = currentBuild = model.currentBuild |> RemoteData.toMaybe in - case ( currentBuild, currentBuild |> Maybe.andThen .output ) of + case ( currentBuild, currentBuild |> Maybe.andThen (.output >> toMaybe) ) of ( Just cb, Just output ) -> let ( newOutput, outputEffects, outMsg ) = updater output in handleOutMsg outMsg - ( { model | currentBuild = RemoteData.Success { cb | output = Just newOutput } } + ( { model | currentBuild = RemoteData.Success { cb | output = Output newOutput } } , effects ++ outputEffects ) @@ -730,7 +741,7 @@ handleBuildFetched browsingIndex build ( model, effects ) = Nothing -> { build = build , prep = Nothing - , output = Nothing + , output = Empty } Just cb -> @@ -806,7 +817,7 @@ initBuildOutput build ( model, effects ) = ( { model | currentBuild = RemoteData.map - (\info -> { info | output = Just output }) + (\info -> { info | output = Output output }) model.currentBuild } , effects ++ outputCmd @@ -1134,13 +1145,18 @@ mmDDYY = ] -viewBuildOutput : Session -> Maybe OutputModel -> Html Message +viewBuildOutput : Session -> CurrentOutput -> Html Message viewBuildOutput session output = case output of - Just o -> + Output o -> Build.Output.Output.view session o - Nothing -> + Cancelled -> + Html.div + Styles.errorLog + [ Html.text "build cancelled" ] + + Empty -> Html.div [] [] diff --git a/web/elm/src/Build/Models.elm b/web/elm/src/Build/Models.elm index b90d293b8..8689731a4 100644 --- a/web/elm/src/Build/Models.elm +++ b/web/elm/src/Build/Models.elm @@ -1,8 +1,10 @@ module Build.Models exposing ( BuildPageType(..) , CurrentBuild + , CurrentOutput(..) , Model , StepHeaderType(..) + , toMaybe ) import Build.Output.Models exposing (OutputModel) @@ -44,10 +46,29 @@ type alias Model = type alias CurrentBuild = { build : Concourse.Build , prep : Maybe Concourse.BuildPrep - , output : Maybe OutputModel + , output : CurrentOutput } +type CurrentOutput + = Empty + | Cancelled + | Output OutputModel + + +toMaybe : CurrentOutput -> Maybe OutputModel +toMaybe currentOutput = + case currentOutput of + Empty -> + Nothing + + Cancelled -> + Nothing + + Output outputModel -> + Just outputModel + + type BuildPageType = OneOffBuildPage Concourse.BuildId | JobBuildPage Concourse.JobBuildIdentifier diff --git a/web/elm/src/Build/StepTree/Models.elm b/web/elm/src/Build/StepTree/Models.elm index 5c9ec05ec..689efbcf7 100644 --- a/web/elm/src/Build/StepTree/Models.elm +++ b/web/elm/src/Build/StepTree/Models.elm @@ -67,7 +67,7 @@ type alias Step = , state : StepState , log : Ansi.Log.Model , error : Maybe String - , expanded : Maybe Bool + , expanded : Bool , version : Maybe Version , metadata : List MetadataField , firstOccurrence : Bool diff --git a/web/elm/src/Build/StepTree/StepTree.elm b/web/elm/src/Build/StepTree/StepTree.elm index 5341c3bb1..54b332606 100644 --- a/web/elm/src/Build/StepTree/StepTree.elm +++ b/web/elm/src/Build/StepTree/StepTree.elm @@ -160,21 +160,21 @@ initBottom hl create id name = , expanded = case hl of HighlightNothing -> - Nothing + False HighlightLine stepID _ -> if id == stepID then - Just True + True else - Nothing + False HighlightRange stepID _ _ -> if id == stepID then - Just True + True else - Nothing + False , version = Nothing , metadata = [] , firstOccurrence = False @@ -319,10 +319,7 @@ finished root = toggleStep : StepID -> StepTreeModel -> ( StepTreeModel, List Effect ) toggleStep id root = - ( updateAt - id - (map (\step -> { step | expanded = toggleExpanded step })) - root + ( updateAt id (map (\step -> { step | expanded = not step.expanded })) root , [] ) @@ -374,11 +371,6 @@ extendHighlight id line root = ( { root | highlight = hl }, [ ModifyUrl (showHighlight hl) ] ) -toggleExpanded : Step -> Maybe Bool -toggleExpanded { expanded, state } = - Just <| not <| Maybe.withDefault (autoExpanded state) expanded - - updateTooltip : { a | hovered : HoverState.HoverState } -> { b | hoveredCounter : Int } @@ -550,11 +542,6 @@ isActive state = state /= StepStatePending && state /= StepStateCancelled -autoExpanded : StepState -> Bool -autoExpanded state = - isActive state && state /= StepStateSucceeded - - viewStep : StepTreeModel -> Session -> Step -> StepHeaderType -> Html Message viewStep model session { id, name, log, state, error, expanded, version, metadata, timestamps, initialize, start, finish } headerType = Html.div @@ -581,15 +568,11 @@ viewStep model session { id, name, log, state, error, expanded, version, metadat , viewStepState state id (viewDurationTooltip initialize start finish (model.tooltip == Just (StepState id))) ] ] - , Html.div - [ classList - [ ( "step-body", True ) - , ( "clearfix", True ) - , ( "step-collapsed", not <| Maybe.withDefault (autoExpanded state) expanded ) + , if expanded then + Html.div + [ class "step-body" + , class "clearfix" ] - ] - <| - if Maybe.withDefault (autoExpanded state) (Maybe.map (always True) expanded) then [ viewMetadata metadata , Html.pre [ class "timestamped-logs" ] <| viewLogs log timestamps model.highlight session.timeZone id @@ -601,8 +584,8 @@ viewStep model session { id, name, log, state, error, expanded, version, metadat Html.span [ class "error" ] [ Html.pre [] [ Html.text msg ] ] ] - else - [] + else + Html.text "" ] diff --git a/web/elm/src/Build/Styles.elm b/web/elm/src/Build/Styles.elm index 4b23e577b..e53b4fd81 100644 --- a/web/elm/src/Build/Styles.elm +++ b/web/elm/src/Build/Styles.elm @@ -3,6 +3,7 @@ module Build.Styles exposing , body , durationTooltip , durationTooltipArrow + , errorLog , firstOccurrenceTooltip , firstOccurrenceTooltipArrow , header @@ -54,7 +55,10 @@ header status = body : List (Html.Attribute msg) body = - [ style "overflow-y" "auto", style "outline" "none" ] + [ style "overflow-y" "auto" + , style "outline" "none" + , style "-webkit-overflow-scrolling" "touch" + ] historyItem : Concourse.BuildStatus -> List (Html.Attribute msg) @@ -235,6 +239,14 @@ durationTooltipArrow = ] +errorLog : List (Html.Attribute msg) +errorLog = + [ style "color" Colors.errorLog + , style "background-color" Colors.frame + , style "padding" "5px 10px" + ] + + retryTabList : List (Html.Attribute msg) retryTabList = [ style "margin" "0" diff --git a/web/elm/src/Colors.elm b/web/elm/src/Colors.elm index d4c3d728f..91591ec3a 100644 --- a/web/elm/src/Colors.elm +++ b/web/elm/src/Colors.elm @@ -14,6 +14,7 @@ module Colors exposing , dropdownUnselectedText , error , errorFaded + , errorLog , failure , failureFaded , flySuccessButtonHover @@ -278,6 +279,11 @@ sideBar = "#333333" +errorLog : String +errorLog = + "#e74c3c" + + retryTabText : String retryTabText = "#f5f5f5" diff --git a/web/elm/src/Concourse.elm b/web/elm/src/Concourse.elm index 0946aa4e9..08e8b3bea 100644 --- a/web/elm/src/Concourse.elm +++ b/web/elm/src/Concourse.elm @@ -16,7 +16,7 @@ module Concourse exposing , BuildStep(..) , CSRFToken , Cause - , ConcourseVersion + , ClusterInfo , HookedPlan , Job , JobBuildIdentifier @@ -523,13 +523,17 @@ decodeBuildStepTimeout = -- Info -type alias ConcourseVersion = - String +type alias ClusterInfo = + { version : String + , clusterName : String + } -decodeInfo : Json.Decode.Decoder ConcourseVersion +decodeInfo : Json.Decode.Decoder ClusterInfo decodeInfo = - Json.Decode.field "version" Json.Decode.string + Json.Decode.succeed ClusterInfo + |> andMap (Json.Decode.field "version" Json.Decode.string) + |> andMap (defaultTo "" <| Json.Decode.field "cluster_name" Json.Decode.string) diff --git a/web/elm/src/Dashboard/Dashboard.elm b/web/elm/src/Dashboard/Dashboard.elm index 296a3c475..b9974147a 100644 --- a/web/elm/src/Dashboard/Dashboard.elm +++ b/web/elm/src/Dashboard/Dashboard.elm @@ -78,7 +78,6 @@ type alias Flags = { turbulencePath : String , searchType : Routes.SearchType , pipelineRunningKeyframes : String - , clusterName : String } @@ -102,7 +101,6 @@ init flags = , query = Routes.extractQuery flags.searchType , isUserMenuExpanded = False , dropdown = Hidden - , clusterName = flags.clusterName } , [ FetchData , PinTeamNames Message.Effects.stickyHeaderConfig @@ -492,7 +490,7 @@ topBar session model = [ Html.div [ style "display" "flex", style "align-items" "center" ] [ SideBar.hamburgerMenu session , Html.a (href "/" :: Views.Styles.concourseLogo) [] - , clusterName model + , clusterNameView session ] ] ++ (let @@ -519,11 +517,11 @@ topBar session model = ) -clusterName : Model -> Html Message -clusterName model = +clusterNameView : Session -> Html Message +clusterNameView session = Html.div Styles.clusterName - [ Html.text model.clusterName ] + [ Html.text session.clusterName ] dashboardView : diff --git a/web/elm/src/Dashboard/Models.elm b/web/elm/src/Dashboard/Models.elm index 7d77d820c..6075ae108 100644 --- a/web/elm/src/Dashboard/Models.elm +++ b/web/elm/src/Dashboard/Models.elm @@ -30,7 +30,6 @@ type alias Model = , userState : UserState.UserState , highDensity : Bool , query : String - , clusterName : String } ) diff --git a/web/elm/src/Dashboard/Styles.elm b/web/elm/src/Dashboard/Styles.elm index e35306bc1..f71724686 100644 --- a/web/elm/src/Dashboard/Styles.elm +++ b/web/elm/src/Dashboard/Styles.elm @@ -79,6 +79,7 @@ content highDensity = , style "height" "100%" , style "width" "100%" , style "box-sizing" "border-box" + , style "-webkit-overflow-scrolling" "touch" , style "flex-direction" <| if highDensity then "column" diff --git a/web/elm/src/Message/Callback.elm b/web/elm/src/Message/Callback.elm index 1d86031c1..77890426c 100644 --- a/web/elm/src/Message/Callback.elm +++ b/web/elm/src/Message/Callback.elm @@ -33,7 +33,7 @@ type Callback | BuildResourcesFetched (Fetched ( Int, Concourse.BuildResources )) | ResourceFetched (Fetched Concourse.Resource) | VersionedResourcesFetched (Fetched ( Maybe Page, Paginated Concourse.VersionedResource )) - | VersionFetched (Fetched String) + | ClusterInfoFetched (Fetched Concourse.ClusterInfo) | PausedToggled (Fetched ()) | InputToFetched (Fetched ( VersionId, List Concourse.Build )) | OutputOfFetched (Fetched ( VersionId, List Concourse.Build )) diff --git a/web/elm/src/Message/Effects.elm b/web/elm/src/Message/Effects.elm index bc16fa5a9..0d5956266 100644 --- a/web/elm/src/Message/Effects.elm +++ b/web/elm/src/Message/Effects.elm @@ -113,7 +113,7 @@ type Effect | FetchResources Concourse.PipelineIdentifier | FetchBuildResources Concourse.BuildId | FetchPipeline Concourse.PipelineIdentifier - | FetchVersion + | FetchClusterInfo | FetchInputTo Concourse.VersionedResourceIdentifier | FetchOutputOf Concourse.VersionedResourceIdentifier | FetchData @@ -218,9 +218,9 @@ runEffect effect key csrfToken = Network.Pipeline.fetchPipeline id |> Task.attempt PipelineFetched - FetchVersion -> + FetchClusterInfo -> Network.Info.fetch - |> Task.attempt VersionFetched + |> Task.attempt ClusterInfoFetched FetchInputTo id -> Network.Resource.fetchInputTo id diff --git a/web/elm/src/Network/DashboardAPIData.elm b/web/elm/src/Network/DashboardAPIData.elm index bd0ec8d2d..daa219f88 100644 --- a/web/elm/src/Network/DashboardAPIData.elm +++ b/web/elm/src/Network/DashboardAPIData.elm @@ -29,7 +29,7 @@ remoteData = (\resources -> Network.Info.fetch |> Task.andThen - (\version -> + (\clusterInfo -> Network.User.fetchUser |> Task.map Just |> Task.onError @@ -42,7 +42,7 @@ remoteData = , jobs = jobs , resources = resources , user = user - , version = version + , version = clusterInfo.version } ) ) diff --git a/web/elm/src/Network/Info.elm b/web/elm/src/Network/Info.elm index bd8fb1704..2fdec211a 100644 --- a/web/elm/src/Network/Info.elm +++ b/web/elm/src/Network/Info.elm @@ -5,6 +5,6 @@ import Http import Task exposing (Task) -fetch : Task Http.Error Concourse.ConcourseVersion +fetch : Task Http.Error Concourse.ClusterInfo fetch = Http.toTask <| Http.get "/api/v1/info" Concourse.decodeInfo diff --git a/web/elm/src/Pipeline/Pipeline.elm b/web/elm/src/Pipeline/Pipeline.elm index 1f26dc88e..ba3bff123 100644 --- a/web/elm/src/Pipeline/Pipeline.elm +++ b/web/elm/src/Pipeline/Pipeline.elm @@ -106,7 +106,7 @@ init flags = in ( model , [ FetchPipeline flags.pipelineLocator - , FetchVersion + , FetchClusterInfo , ResetPipelineFocus , FetchPipelines ] @@ -256,7 +256,7 @@ handleCallback callback ( model, effects ) = , effects ) - VersionFetched (Ok version) -> + ClusterInfoFetched (Ok { version }) -> ( { model | concourseVersion = version , experiencingTurbulence = False @@ -264,7 +264,7 @@ handleCallback callback ( model, effects ) = , effects ) - VersionFetched (Err _) -> + ClusterInfoFetched (Err _) -> ( { model | experiencingTurbulence = True }, effects ) PipelinesFetched (Err _) -> @@ -307,7 +307,7 @@ handleDelivery delivery ( model, effects ) = ) ClockTicked OneMinute _ -> - ( model, effects ++ [ FetchVersion ] ) + ( model, effects ++ [ FetchClusterInfo ] ) _ -> ( model, effects ) diff --git a/web/elm/src/SideBar/Styles.elm b/web/elm/src/SideBar/Styles.elm index ab64cf9c8..881135743 100644 --- a/web/elm/src/SideBar/Styles.elm +++ b/web/elm/src/SideBar/Styles.elm @@ -39,6 +39,7 @@ sideBar = , style "padding-right" "10px" , style "box-sizing" "border-box" , style "padding-bottom" "10px" + , style "-webkit-overflow-scrolling" "touch" ] @@ -246,16 +247,7 @@ pipelineIcon opacity = , style "background-size" "contain" , style "margin-left" "28px" , style "flex-shrink" "0" - , style "opacity" <| - case opacity of - Bright -> - "1" - - GreyedOut -> - "0.5" - - Dim -> - "0.2" + , opacityAttr opacity ] diff --git a/web/elm/src/SubPage/SubPage.elm b/web/elm/src/SubPage/SubPage.elm index 3b2c38634..ce5b218a7 100644 --- a/web/elm/src/SubPage/SubPage.elm +++ b/web/elm/src/SubPage/SubPage.elm @@ -91,7 +91,6 @@ init session route = { turbulencePath = session.turbulenceImgSrc , searchType = searchType , pipelineRunningKeyframes = session.pipelineRunningKeyframes - , clusterName = session.clusterName } |> Tuple.mapFirst DashboardModel diff --git a/web/elm/tests/BuildTests.elm b/web/elm/tests/BuildTests.elm index 78d81c44e..343fe54d8 100644 --- a/web/elm/tests/BuildTests.elm +++ b/web/elm/tests/BuildTests.elm @@ -6,6 +6,7 @@ import Build.Build as Build import Build.Models as Models import Build.StepTree.Models as STModels import Char +import Colors import Common exposing (defineHoverBehaviour, isColorWithStripes) import Concourse exposing (BuildPrepStatus(..)) import Concourse.Pagination exposing (Direction(..)) @@ -56,7 +57,6 @@ all = , notFoundImgSrc = "" , csrfToken = csrfToken , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "" } @@ -658,6 +658,52 @@ all = |> Tuple.first |> Common.queryView |> Query.has [ class "not-authorized" ] + , test "shows 'build cancelled' in red when aborted build's plan request gives 404" <| + \_ -> + initFromApplication + |> Application.handleCallback + (Callback.BuildFetched <| + Ok + ( 1 + , { id = 1 + , name = "1" + , job = + Just + { teamName = "team" + , pipelineName = "pipeline" + , jobName = "job" + } + , status = Concourse.BuildStatusAborted + , duration = + { startedAt = Nothing + , finishedAt = Just <| Time.millisToPosix 0 + } + , reapTime = Nothing + } + ) + ) + |> Tuple.first + |> Application.handleCallback + (Callback.PlanAndResourcesFetched 1 <| + Err <| + Http.BadStatus + { url = "http://example.com" + , status = + { code = 404 + , message = "not found" + } + , headers = Dict.empty + , body = "" + } + ) + |> Tuple.first + |> Common.queryView + |> Query.has + [ style "background-color" Colors.frame + , style "padding" "5px 10px" + , style "color" Colors.errorLog + , containing [ text "build cancelled" ] + ] , test "shows passport officer when build prep request gives 401" <| \_ -> initFromApplication @@ -837,6 +883,12 @@ all = Time.customZone (5 * 60) [] ) |> Tuple.first + |> Application.update + (Msgs.Update <| + Message.Message.Click <| + Message.Message.StepHeader "stepid" + ) + |> Tuple.first |> Common.queryView |> Query.findAll [ class "timestamped-line" ] |> Query.first @@ -1101,7 +1153,6 @@ all = , notFoundImgSrc = "notfound.svg" , csrfToken = "csrf_token" , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "pipeline-running" } { protocol = Url.Http @@ -1128,7 +1179,6 @@ all = , notFoundImgSrc = "notfound.svg" , csrfToken = "csrf_token" , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "pipeline-running" } { protocol = Url.Http @@ -1218,6 +1268,12 @@ all = >> Common.queryView >> Query.find [ id "build-body" ] >> Query.has [ style "overflow-y" "auto" ] + , test "build body has momentum based scroll enabled" <| + givenBuildFetched + >> Tuple.first + >> Common.queryView + >> Query.find [ id "build-body" ] + >> Query.has [ style "-webkit-overflow-scrolling" "touch" ] , test "fetches build history and job details after build is fetched" <| givenBuildFetched >> Tuple.second @@ -2426,7 +2482,82 @@ all = ) >> Tuple.first in - [ test "build step header lays out horizontally" <| + [ test "step is collapsed by default" <| + fetchPlanWithGetStep + >> Application.handleDelivery + (EventsReceived <| + Ok <| + [ { url = + eventsUrl + , data = + STModels.InitializeGet + { source = "" + , id = "plan" + } + (Time.millisToPosix 0) + } + ] + ) + >> Tuple.first + >> Common.queryView + >> Query.hasNot [ class "step-body" ] + , test "step expands on click" <| + fetchPlanWithGetStep + >> Application.handleDelivery + (EventsReceived <| + Ok <| + [ { url = + eventsUrl + , data = + STModels.InitializeGet + { source = "" + , id = "plan" + } + (Time.millisToPosix 0) + } + ] + ) + >> Tuple.first + >> Application.update + (Msgs.Update <| + Message.Message.Click <| + Message.Message.StepHeader "plan" + ) + >> Tuple.first + >> Common.queryView + >> Query.has [ class "step-body" ] + , test "expanded step collapses on click" <| + fetchPlanWithGetStep + >> Application.handleDelivery + (EventsReceived <| + Ok <| + [ { url = + eventsUrl + , data = + STModels.InitializeGet + { source = "" + , id = "plan" + } + (Time.millisToPosix 0) + } + ] + ) + >> Tuple.first + >> Application.update + (Msgs.Update <| + Message.Message.Click <| + Message.Message.StepHeader "plan" + ) + >> Tuple.first + >> Application.update + (Msgs.Update <| + Message.Message.Click <| + Message.Message.StepHeader "plan" + ) + >> Tuple.first + >> Common.queryView + >> Query.hasNot [ class "step-body" ] + , test "build step header lays out horizontally" <| fetchPlanWithGetStep >> Common.queryView >> Query.find [ class "header" ] diff --git a/web/elm/tests/Common.elm b/web/elm/tests/Common.elm index f3412c521..9375a27d2 100644 --- a/web/elm/tests/Common.elm +++ b/web/elm/tests/Common.elm @@ -70,7 +70,6 @@ init path = , notFoundImgSrc = "notfound.svg" , csrfToken = "csrf_token" , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "pipeline-running" } { protocol = Url.Http @@ -102,7 +101,6 @@ iOpenTheBuildPage _ = , csrfToken = "" , authToken = "" , pipelineRunningKeyframes = "" - , clusterName = "" } { protocol = Url.Http , host = "" diff --git a/web/elm/tests/DashboardTests.elm b/web/elm/tests/DashboardTests.elm index 88ccbbcd3..58922987c 100644 --- a/web/elm/tests/DashboardTests.elm +++ b/web/elm/tests/DashboardTests.elm @@ -127,7 +127,6 @@ flags = , notFoundImgSrc = "" , csrfToken = csrfToken , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = pipelineRunningKeyframes } @@ -142,7 +141,6 @@ all = , notFoundImgSrc = "notfound.svg" , csrfToken = "csrf_token" , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "pipeline-running" } { protocol = Url.Http @@ -155,6 +153,25 @@ all = |> Tuple.second |> List.member Effects.GetScreenSize |> Expect.true "should request screen size" + , test "requests cluster info on page load" <| + \_ -> + Application.init + { turbulenceImgSrc = "" + , notFoundImgSrc = "notfound.svg" + , csrfToken = "csrf_token" + , authToken = "" + , pipelineRunningKeyframes = "pipeline-running" + } + { protocol = Url.Http + , host = "" + , port_ = Nothing + , path = "/" + , query = Nothing + , fragment = Nothing + } + |> Tuple.second + |> List.member Effects.FetchClusterInfo + |> Expect.true "should request cluster info" , test "redirects to login if any data call gives a 401" <| \_ -> Common.init "/" @@ -181,21 +198,8 @@ all = |> Expect.equal "Dashboard - Concourse" , test "renders cluster name at top left" <| \_ -> - Application.init - { turbulenceImgSrc = "" - , notFoundImgSrc = "" - , csrfToken = csrfToken - , authToken = "" - , clusterName = "foobar" - , pipelineRunningKeyframes = "" - } - { protocol = Url.Http - , host = "" - , port_ = Nothing - , path = "/" - , query = Nothing - , fragment = Nothing - } + Common.init "/" + |> givenClusterInfo "0.0.0-dev" "foobar" |> Tuple.first |> Common.queryView |> Query.find [ id "top-bar-app" ] @@ -992,6 +996,17 @@ all = |> Query.find teamHeaderSelector |> Query.find [ containing [ text "OWNER" ] ] |> Query.has [ style "margin-bottom" "" ] + , test "has momentum based scrolling" <| + \_ -> + whenOnDashboard { highDensity = True } + |> givenDataAndUser + (apiData [ ( "team", [ "pipeline" ] ) ]) + (userWithRoles []) + |> Tuple.first + |> Common.queryView + |> Query.find [ id "page-below-top-bar" ] + |> Query.find [ class "dashboard" ] + |> Query.has [ style "-webkit-overflow-scrolling" "touch" ] ] , describe "pipeline cards" <| let @@ -3567,6 +3582,18 @@ givenDataUnauthenticated data = ) +givenClusterInfo : + String + -> String + -> Application.Model + -> ( Application.Model, List Effects.Effect ) +givenClusterInfo version clusterName = + Application.handleCallback + (Callback.ClusterInfoFetched <| + Ok { version = version, clusterName = clusterName } + ) + + givenPipelineWithJob : Maybe Concourse.User -> Concourse.APIData givenPipelineWithJob user = { teams = [] diff --git a/web/elm/tests/FlySuccessTests.elm b/web/elm/tests/FlySuccessTests.elm index 2e2ceb89a..d80ebe79c 100644 --- a/web/elm/tests/FlySuccessTests.elm +++ b/web/elm/tests/FlySuccessTests.elm @@ -60,7 +60,6 @@ flags = , notFoundImgSrc = "" , csrfToken = "" , authToken = authToken - , clusterName = "" , pipelineRunningKeyframes = "" } diff --git a/web/elm/tests/JobTests.elm b/web/elm/tests/JobTests.elm index 42f50e064..d8a3878cd 100644 --- a/web/elm/tests/JobTests.elm +++ b/web/elm/tests/JobTests.elm @@ -102,7 +102,6 @@ all = , notFoundImgSrc = "" , csrfToken = csrfToken , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "" } @@ -157,7 +156,6 @@ all = , notFoundImgSrc = "notfound.svg" , csrfToken = "csrf_token" , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "pipeline-running" } { protocol = Url.Http @@ -178,7 +176,6 @@ all = , csrfToken = "" , authToken = "" , pipelineRunningKeyframes = "" - , clusterName = "" } { protocol = Url.Http , host = "" diff --git a/web/elm/tests/PipelineTests.elm b/web/elm/tests/PipelineTests.elm index ab9aa2b18..c427b0d0b 100644 --- a/web/elm/tests/PipelineTests.elm +++ b/web/elm/tests/PipelineTests.elm @@ -53,7 +53,6 @@ flags = , notFoundImgSrc = "" , csrfToken = csrfToken , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "" } @@ -279,7 +278,6 @@ all = , csrfToken = csrfToken , authToken = "" , pipelineRunningKeyframes = "" - , clusterName = "" } { protocol = Url.Http , host = "" @@ -391,7 +389,7 @@ all = ) ) |> Tuple.second - |> Expect.equal [ Effects.FetchVersion ] + |> Expect.equal [ Effects.FetchClusterInfo ] , describe "Legend" <| let clockTick = diff --git a/web/elm/tests/ResourceTests.elm b/web/elm/tests/ResourceTests.elm index 7bd077354..e4525cb56 100644 --- a/web/elm/tests/ResourceTests.elm +++ b/web/elm/tests/ResourceTests.elm @@ -213,7 +213,6 @@ all = , notFoundImgSrc = "notfound.svg" , csrfToken = "csrf_token" , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "pipeline-running" } { protocol = Url.Http @@ -3137,7 +3136,6 @@ flags = , notFoundImgSrc = "" , csrfToken = csrfToken , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "" } diff --git a/web/elm/tests/SideBarFeature.elm b/web/elm/tests/SideBarFeature.elm index ccdbf9c5c..37a3275b8 100644 --- a/web/elm/tests/SideBarFeature.elm +++ b/web/elm/tests/SideBarFeature.elm @@ -382,6 +382,11 @@ hasSideBar iAmLookingAtThePage = >> given iClickedThePipelineGroup >> when iAmLookingAtTheFirstPipelineIcon >> then_ iSeeItDoesNotShrink + , test "pipeline icon is dim" <| + given iHaveAnOpenSideBar_ + >> given iClickedThePipelineGroup + >> when iAmLookingAtTheFirstPipelineIcon + >> then_ iSeeItIsDim , test "pipeline link has 2.5px padding" <| given iHaveAnOpenSideBar_ >> given iClickedThePipelineGroup @@ -417,6 +422,12 @@ hasSideBar iAmLookingAtThePage = >> given iClickedThePipelineGroup >> when iAmLookingAtTheFirstPipelineLink >> then_ iSeeItHasAValidPipelineId + , test "pipeline icon is bright when pipeline link is hovered" <| + given iHaveAnOpenSideBar_ + >> given iClickedThePipelineGroup + >> given iHoveredThePipelineLink + >> when iAmLookingAtTheFirstPipelineIcon + >> then_ iSeeItIsBright , test "hovering the pipelink link checks its viewport" <| given iHaveAnOpenSideBar_ >> given iClickedThePipelineGroup @@ -502,6 +513,21 @@ hasCurrentPipelineInSideBar iAmLookingAtThePage = >> given myBrowserFetchedPipelinesFromMultipleTeams >> when iAmLookingAtTheOtherPipelineList >> then_ iSeeNoPipelineNames + , test "current team has bright team icon" <| + given iAmLookingAtThePage + >> given iAmOnANonPhoneScreen + >> given myBrowserFetchedPipelinesFromMultipleTeams + >> given iClickedTheHamburgerIcon + >> when iAmLookingAtTheOtherTeamIcon + >> then_ iSeeItIsBright + , test "current team name is bright" <| + given iAmLookingAtThePage + >> given iAmOnANonPhoneScreen + >> given myBrowserFetchedPipelinesFromMultipleTeams + >> given iClickedTheHamburgerIcon + >> given iClickedTheOtherPipelineGroup + >> when iAmLookingAtTheOtherTeamName + >> then_ iSeeItIsBright , test "current pipeline name has a grey border" <| given iAmLookingAtThePage >> given iAmOnANonPhoneScreen @@ -516,6 +542,20 @@ hasCurrentPipelineInSideBar iAmLookingAtThePage = >> given iClickedTheHamburgerIcon >> when iAmLookingAtTheOtherPipelineName >> then_ iSeeADarkGreyBackground + , test "current pipeline has bright pipeline icon" <| + given iAmLookingAtThePage + >> given iAmOnANonPhoneScreen + >> given myBrowserFetchedPipelinesFromMultipleTeams + >> given iClickedTheHamburgerIcon + >> when iAmLookingAtTheOtherPipelineIcon + >> then_ iSeeItIsBright + , test "current pipeline name is bright" <| + given iAmLookingAtThePage + >> given iAmOnANonPhoneScreen + >> given myBrowserFetchedPipelinesFromMultipleTeams + >> given iClickedTheHamburgerIcon + >> when iAmLookingAtTheOtherPipelineName + >> then_ iSeeItIsBright , test "pipeline with same name on other team has invisible border" <| given iAmLookingAtThePage >> given iAmOnANonPhoneScreen @@ -695,7 +735,6 @@ iVisitTheDashboard _ = , csrfToken = "" , authToken = "" , pipelineRunningKeyframes = "" - , clusterName = "" } { protocol = Url.Http , host = "" @@ -993,6 +1032,14 @@ iSeeItIsBright = Query.has [ style "opacity" "1" ] +iSeeItIsGreyedOut = + Query.has [ style "opacity" "0.7" ] + + +iSeeItIsDim = + Query.has [ style "opacity" "0.3" ] + + iAmLookingAtThePipelineList = iAmLookingAtTheTeam >> Query.children [] >> Query.index 1 @@ -1175,7 +1222,6 @@ iOpenedThePipelinePage _ = , csrfToken = "" , authToken = "" , pipelineRunningKeyframes = "" - , clusterName = "" } { protocol = Url.Http , host = "" @@ -1651,7 +1697,6 @@ iOpenTheJobPage _ = , csrfToken = "" , authToken = "" , pipelineRunningKeyframes = "" - , clusterName = "" } { protocol = Url.Http , host = "" @@ -1669,7 +1714,6 @@ iOpenTheResourcePage _ = , csrfToken = "" , authToken = "" , pipelineRunningKeyframes = "" - , clusterName = "" } { protocol = Url.Http , host = "" diff --git a/web/elm/tests/StepTreeTests.elm b/web/elm/tests/StepTreeTests.elm index 1f821e4a2..339475277 100644 --- a/web/elm/tests/StepTreeTests.elm +++ b/web/elm/tests/StepTreeTests.elm @@ -55,7 +55,7 @@ someVersionedStep version id name state = , state = state , log = cookedLog , error = Nothing - , expanded = Nothing + , expanded = False , version = version , metadata = [] , firstOccurrence = False diff --git a/web/elm/tests/TopBarTests.elm b/web/elm/tests/TopBarTests.elm index a65c9bcba..abffaed9f 100644 --- a/web/elm/tests/TopBarTests.elm +++ b/web/elm/tests/TopBarTests.elm @@ -124,7 +124,6 @@ flags = , notFoundImgSrc = "" , csrfToken = "" , authToken = "" - , clusterName = "" , pipelineRunningKeyframes = "" } diff --git a/web/indexhandler/handler.go b/web/indexhandler/handler.go index a7bc0e382..f38226c3f 100644 --- a/web/indexhandler/handler.go +++ b/web/indexhandler/handler.go @@ -9,12 +9,9 @@ import ( "github.com/gobuffalo/packr" ) -var ClusterName = "" - type templateData struct { CSRFToken string AuthToken string - ClusterName string } type handler struct { @@ -65,7 +62,6 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { err := h.template.Execute(w, templateData{ CSRFToken: csrfToken, AuthToken: authToken, - ClusterName: ClusterName, }) if err != nil { diff --git a/web/public/index.html b/web/public/index.html index 80cea20d9..7d3c92653 100644 --- a/web/public/index.html +++ b/web/public/index.html @@ -84,7 +84,6 @@ notFoundImgSrc: {{asset "images/parachute-error-404.svg"}}, csrfToken: {{ .CSRFToken }}, authToken: {{ .AuthToken }}, - clusterName: {{ .ClusterName }}, pipelineRunningKeyframes: "pipeline-running" } }); diff --git a/web/wats/test/build.js b/web/wats/test/build.js index d04ade22d..06c9936a5 100644 --- a/web/wats/test/build.js +++ b/web/wats/test/build.js @@ -29,9 +29,11 @@ test('shows abort hooks', async t => { await t.context.web.waitForText("say-bye-from-step"); await t.context.web.waitForText("say-bye-from-job"); - await t.context.web.waitForText("looping"); + // await t.context.web.waitForText("looping"); - await t.context.web.clickAndWait('button[title="Abort Build"]', '.build-header[style*="rgb(139, 87, 42)"]'); // brown + await t.context.web.page.waitFor('button[title="Abort Build"]'); + await t.context.web.page.click('button[title="Abort Build"]'); + await t.context.web.page.waitForSelector( '.build-header[style*="rgb(139, 87, 42)"]', {timeout: 360000}); // brown await t.context.web.page.waitFor('[data-step-name="say-bye-from-step"] [data-step-state="succeeded"]'); await t.context.web.page.waitFor('[data-step-name="say-bye-from-job"] [data-step-state="succeeded"]'); diff --git a/web/wats/test/dashboard.js b/web/wats/test/dashboard.js index 865c42ad5..48bd6b3ec 100644 --- a/web/wats/test/dashboard.js +++ b/web/wats/test/dashboard.js @@ -100,3 +100,10 @@ test('auto-refreshes to reflect state changes', showsPipelineState, async t => { let newBackground = await t.context.web.computedStyle(newBanner, 'backgroundColor'); t.deepEqual(color(newBackground), palette.red); }); + +test('picks up cluster name from configuration', async t => { + await t.context.web.page.goto(t.context.web.route('/')); + const clusterName = await t.context.web.page.$eval(`#top-bar-app > div:nth-child(1)`, el => el.innerText); + + t.is(clusterName, 'dev'); +}); diff --git a/yarn.lock b/yarn.lock index 3128ff3ec..2ac25ff9d 100644 --- a/yarn.lock +++ b/yarn.lock @@ -27,7 +27,7 @@ agent-base@^4.1.0: dependencies: es6-promisify "^5.0.0" -ajv@^5.1.0, ajv@^5.3.0: +ajv@^5.3.0: version "5.5.2" resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.5.2.tgz#73b5eeca3fab653e3d3f9422b341ad42205dc965" integrity sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU= @@ -37,11 +37,6 @@ ajv@^5.1.0, ajv@^5.3.0: fast-json-stable-stringify "^2.0.0" json-schema-traverse "^0.3.0" -amdefine@>=0.0.4: - version "1.0.1" - resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5" - integrity sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU= - ansi-regex@^2.0.0: version "2.1.1" resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" @@ -172,11 +167,6 @@ aws-sign2@~0.7.0: resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" integrity sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg= -aws4@^1.6.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.7.0.tgz#d4d0e9b9dbfca77bf08eeb0a8a471550fe39e289" - integrity sha512-32NDda82rhwD9/JBCCkB+MRYDp0oSvlo2IL6rQWA10PQi7tDUM3eqMSltXmY+Oyl/7N3P3qNtAlv7X0d9bI28w== - aws4@^1.8.0: version "1.8.0" resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.8.0.tgz#f0e003d9ca9e7f59c7a508945d7b2ef9a04a542f" @@ -267,20 +257,6 @@ body-parser@1.18.2: raw-body "2.3.2" type-is "~1.6.15" -boom@4.x.x: - version "4.3.1" - resolved "https://registry.yarnpkg.com/boom/-/boom-4.3.1.tgz#4f8a3005cb4a7e3889f749030fd25b96e01d2e31" - integrity sha1-T4owBctKfjiJ90kDD9JbluAdLjE= - dependencies: - hoek "4.x.x" - -boom@5.x.x: - version "5.2.0" - resolved "https://registry.yarnpkg.com/boom/-/boom-5.2.0.tgz#5dd9da6ee3a5f302077436290cb717d3f4a54e02" - integrity sha512-Z5BTk6ZRe4tXXQlkqftmsAUANpXmuwlsF5Oov8ThoMbQRzdGTA1ngYRW160GexgOgjsFOKJz0LYhoNi+2AMBUw== - dependencies: - hoek "4.x.x" - brace-expansion@^1.1.7: version "1.1.11" resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" @@ -458,13 +434,21 @@ class-utils@^0.3.5: isobject "^3.0.0" static-extend "^0.1.1" -clean-css@^3.0.1: - version "3.4.28" - resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-3.4.28.tgz#bf1945e82fc808f55695e6ddeaec01400efd03ff" - integrity sha1-vxlF6C/ICPVWlebd6uwBQA79A/8= +clean-css-cli@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/clean-css-cli/-/clean-css-cli-4.3.0.tgz#8502aa86d1879e5b111af51b3c2abb799e0684ce" + integrity sha512-8GHZfr+mG3zB/Lgqrr27qHBFsPSn0fyEI3f2rIZpxPxUbn2J6A8xyyeBRVTW8duDuXigN0s80vsXiXJOEFIO5Q== dependencies: - commander "2.8.x" - source-map "0.4.x" + clean-css "^4.2.1" + commander "2.x" + glob "7.x" + +clean-css@^4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-4.2.1.tgz#2d411ef76b8569b6d0c84068dabe85b0aa5e5c17" + integrity sha512-4ZxI6dy4lrY6FHzfiy1aEOXgu4LIsW2MhwG0VBKdcoGoH/XLFgaHSdLTGr4O8Be6A8r3MOphEiI8Gc1n0ecf3g== + dependencies: + source-map "~0.6.0" cliui@^4.0.0: version "4.1.0" @@ -505,7 +489,7 @@ color-name@1.1.3: resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= -combined-stream@1.0.6, combined-stream@~1.0.5: +combined-stream@1.0.6: version "1.0.6" resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.6.tgz#723e7df6e801ac5613113a7e445a9b69cb632818" integrity sha1-cj599ugBrFYTETp+RFqbactjKBg= @@ -519,12 +503,10 @@ combined-stream@~1.0.6: dependencies: delayed-stream "~1.0.0" -commander@2.8.x: - version "2.8.1" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.8.1.tgz#06be367febfda0c330aa1e2a072d3dc9762425d4" - integrity sha1-Br42f+v9oMMwqh4qBy09yXYkJdQ= - dependencies: - graceful-readlink ">= 1.0.0" +commander@2.x: + version "2.20.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.0.tgz#d58bb2b5c1ee8f87b0d340027e9e94e222c5a422" + integrity sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ== commander@~2.15.0: version "2.15.1" @@ -625,13 +607,6 @@ cross-spawn@^5.0.1: shebang-command "^1.2.0" which "^1.2.9" -cryptiles@3.x.x: - version "3.1.2" - resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-3.1.2.tgz#a89fbb220f5ce25ec56e8c4aa8a4fd7b5b0d29fe" - integrity sha1-qJ+7Ig9c4l7FboxKqKT9e1sNKf4= - dependencies: - boom "5.x.x" - dashdash@^1.12.0: version "1.14.1" resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" @@ -933,11 +908,6 @@ extend-shallow@^3.0.0, extend-shallow@^3.0.2: assign-symbols "^1.0.0" is-extendable "^1.0.1" -extend@~3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444" - integrity sha1-p1Xqe8Gt/MWjHOfnYtuq3F5jZEQ= - extend@~3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" @@ -1064,7 +1034,7 @@ forever-agent@~0.6.1: resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE= -form-data@~2.3.1, form-data@~2.3.2: +form-data@~2.3.2: version "2.3.2" resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.2.tgz#4970498be604c20c005d4f5c23aecd21d6b49099" integrity sha1-SXBJi+YEwgwAXU9cI67NIda0kJk= @@ -1203,7 +1173,7 @@ glob@7.1.1: once "^1.3.0" path-is-absolute "^1.0.0" -glob@^7.1.3: +glob@7.x, glob@^7.1.3: version "7.1.4" resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.4.tgz#aa608a2f6c577ad357e1ae5a5c26d9a8d1969255" integrity sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A== @@ -1220,24 +1190,11 @@ graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.1.9: resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.15.tgz#ffb703e1066e8a0eeaa4c8b80ba9253eeefbfb00" integrity sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA== -"graceful-readlink@>= 1.0.0": - version "1.0.1" - resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725" - integrity sha1-TK+tdrxi8C+gObL5Tpo906ORpyU= - har-schema@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" integrity sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI= -har-validator@~5.0.3: - version "5.0.3" - resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.0.3.tgz#ba402c266194f15956ef15e0fcf242993f6a7dfd" - integrity sha1-ukAsJmGU8VlW7xXg/PJCmT9qff0= - dependencies: - ajv "^5.1.0" - har-schema "^2.0.0" - har-validator@~5.1.0: version "5.1.0" resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.0.tgz#44657f5688a22cfd4b72486e81b3a3fb11742c29" @@ -1299,21 +1256,6 @@ has-values@^1.0.0: is-number "^3.0.0" kind-of "^4.0.0" -hawk@~6.0.2: - version "6.0.2" - resolved "https://registry.yarnpkg.com/hawk/-/hawk-6.0.2.tgz#af4d914eb065f9b5ce4d9d11c1cb2126eecc3038" - integrity sha512-miowhl2+U7Qle4vdLqDdPt9m09K6yZhkLDTWGoUiUzrQCn+mHHSmfJgAyGaLRZbPmTqfFFjRV1QWCW0VWUJBbQ== - dependencies: - boom "4.x.x" - cryptiles "3.x.x" - hoek "4.x.x" - sntp "2.x.x" - -hoek@4.x.x: - version "4.2.1" - resolved "https://registry.yarnpkg.com/hoek/-/hoek-4.2.1.tgz#9634502aa12c445dd5a7c5734b572bb8738aacbb" - integrity sha512-QLg82fGkfnJ/4iy1xZ81/9SIJiq1NGFUMGs6ParyjBZr6jW2Ufj/snDqTHixNlHdPNwN2RLVD0Pi3igeK9+JfA== - http-errors@1.6.2: version "1.6.2" resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.2.tgz#0a002cc85707192a7e7946ceedc11155f60ec736" @@ -1653,13 +1595,6 @@ less-plugin-autoprefix@^1.5.1: autoprefixer "^6.0.0" postcss "^5.0.0" -less-plugin-clean-css@^1.5.1: - version "1.5.1" - resolved "https://registry.yarnpkg.com/less-plugin-clean-css/-/less-plugin-clean-css-1.5.1.tgz#cc57af7aa3398957e56decebe63cb60c23429703" - integrity sha1-zFeveqM5iVflbezr5jy2DCNClwM= - dependencies: - clean-css "^3.0.1" - less@^3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/less/-/less-3.0.2.tgz#1bcb9813bb6090c884ac142f02c633bd42931844" @@ -1773,7 +1708,7 @@ mime-db@~1.37.0: resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.37.0.tgz#0b6a0ce6fdbe9576e25f1f2d2fde8830dc0ad0d8" integrity sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg== -mime-types@^2.1.12, mime-types@~2.1.17: +mime-types@^2.1.12: version "2.1.18" resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.18.tgz#6f323f60a83d11146f831ff11fd66e2fe5503bb8" integrity sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ== @@ -2018,11 +1953,6 @@ number-is-nan@^1.0.0: resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" integrity sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0= -oauth-sign@~0.8.2: - version "0.8.2" - resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43" - integrity sha1-Rqarfwrq2N6unsBWV4C31O/rnUM= - oauth-sign@~0.9.0: version "0.9.0" resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" @@ -2272,7 +2202,7 @@ puppeteer@^1.12.2: rimraf "^2.6.1" ws "^6.1.0" -qs@6.5.1, qs@~6.5.1: +qs@6.5.1: version "6.5.1" resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.1.tgz#349cdf6eef89ec45c12d7d5eb3fc0c870343a6d8" integrity sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A== @@ -2386,7 +2316,7 @@ request-promise@^4.2.0: stealthy-require "^1.1.0" tough-cookie ">=2.3.3" -request@2.88.0, request@^2.87.0: +request@2.88.0, request@^2.83.0, request@^2.86.0, request@^2.87.0: version "2.88.0" resolved "https://registry.yarnpkg.com/request/-/request-2.88.0.tgz#9c2fca4f7d35b592efe57c7f0a55e81052124fef" integrity sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg== @@ -2412,34 +2342,6 @@ request@2.88.0, request@^2.87.0: tunnel-agent "^0.6.0" uuid "^3.3.2" -request@^2.83.0: - version "2.85.0" - resolved "https://registry.yarnpkg.com/request/-/request-2.85.0.tgz#5a03615a47c61420b3eb99b7dba204f83603e1fa" - integrity sha512-8H7Ehijd4js+s6wuVPLjwORxD4zeuyjYugprdOXlPSqaApmL/QOy+EB/beICHVCHkGMKNh5rvihb5ov+IDw4mg== - dependencies: - aws-sign2 "~0.7.0" - aws4 "^1.6.0" - caseless "~0.12.0" - combined-stream "~1.0.5" - extend "~3.0.1" - forever-agent "~0.6.1" - form-data "~2.3.1" - har-validator "~5.0.3" - hawk "~6.0.2" - http-signature "~1.2.0" - is-typedarray "~1.0.0" - isstream "~0.1.2" - json-stringify-safe "~5.0.1" - mime-types "~2.1.17" - oauth-sign "~0.8.2" - performance-now "^2.1.0" - qs "~6.5.1" - safe-buffer "^5.1.1" - stringstream "~0.0.5" - tough-cookie "~2.3.3" - tunnel-agent "^0.6.0" - uuid "^3.1.0" - require-directory@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" @@ -2477,7 +2379,7 @@ safe-buffer@5.1.1: resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.1.tgz#893312af69b2123def71f57889001671eeb2c853" integrity sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg== -safe-buffer@^5.0.1, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: +safe-buffer@^5.0.1, safe-buffer@^5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: version "5.1.2" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== @@ -2615,13 +2517,6 @@ snapdragon@^0.8.1: source-map-resolve "^0.5.0" use "^3.1.0" -sntp@2.x.x: - version "2.1.0" - resolved "https://registry.yarnpkg.com/sntp/-/sntp-2.1.0.tgz#2c6cec14fedc2222739caf9b5c3d85d1cc5a2cc8" - integrity sha512-FL1b58BDrqS3A11lJ0zEdnJ3UOKqVxawAkF3k7F0CVN7VQ34aZrV+G8BZ1WC9ZL7NyrwsW0oviwsWDgRuVYtJg== - dependencies: - hoek "4.x.x" - source-map-resolve@^0.5.0: version "0.5.2" resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.2.tgz#72e2cc34095543e43b2c62b2c4c10d4a9054f259" @@ -2638,19 +2533,12 @@ source-map-url@^0.4.0: resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3" integrity sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM= -source-map@0.4.x: - version "0.4.4" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b" - integrity sha1-66T12pwNyZneaAMti092FzZSA2s= - dependencies: - amdefine ">=0.0.4" - source-map@^0.5.3, source-map@^0.5.6: version "0.5.7" resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w= -source-map@~0.6.1: +source-map@~0.6.0, source-map@~0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== @@ -2736,11 +2624,6 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -stringstream@~0.0.5: - version "0.0.5" - resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878" - integrity sha1-TkhM1N5aC7vuGORjB3EKioFiGHg= - strip-ansi@^3.0.0, strip-ansi@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" @@ -2877,7 +2760,7 @@ to-regex@^3.0.1, to-regex@^3.0.2: regex-not "^1.0.2" safe-regex "^1.1.0" -tough-cookie@>=2.3.3, tough-cookie@~2.3.3: +tough-cookie@>=2.3.3: version "2.3.4" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.4.tgz#ec60cee38ac675063ffc97a5c18970578ee83655" integrity sha512-TZ6TTfI5NtZnuyy/Kecv+CnoROnyXn2DN97LontgQpCwsX2XyLYCC0ENhYkehSOwAp8rTQKc/NUIF7BkQ5rKLA== @@ -3006,11 +2889,6 @@ utils-merge@1.0.1: resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" integrity sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM= -uuid@^3.1.0: - version "3.2.1" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.2.1.tgz#12c528bb9d58d0b9265d9a2f6f0fe8be17ff1f14" - integrity sha512-jZnMwlb9Iku/O3smGWvZhauCf6cvvpKi4BKRiliS3cxnI+Gz9j5MEpTz2UFuXiKPJocb7gnsLHwiS05ige5BEA== - uuid@^3.3.2: version "3.3.2" resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131"