Start refactor of get_step

- moved fetcher logic into worker, but import cycle problem still exists
- split FindOn into standalone method

Signed-off-by: Divya Dadlani <ddadlani@pivotal.io>
Co-authored-by: Sameer Vohra <svohra@pivotal.io>
Co-authored-by: Denise Yu <dyu@pivotal.io>
Signed-off-by: Krishna Mannem <kmannem@pivotal.io>
Signed-off-by: Sameer Vohra <vohra.sam@gmail.com>
This commit is contained in:
Divya Dadlani 2019-07-24 11:31:29 -04:00 committed by Sameer Vohra
parent 95cf99207e
commit e576322d10
76 changed files with 2610 additions and 1117 deletions

View File

@ -20,7 +20,7 @@ import (
. "github.com/onsi/gomega"
)
var _ = Describe("Artifacts API", func() {
var _ = Describe("ArtifactRepository API", func() {
var fakeaccess *accessorfakes.FakeAccess
BeforeEach(func() {

View File

@ -16,7 +16,6 @@ import (
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/api"
"github.com/concourse/concourse/atc/api/accessor"
@ -33,7 +32,6 @@ import (
"github.com/concourse/concourse/atc/db/migration"
"github.com/concourse/concourse/atc/engine"
"github.com/concourse/concourse/atc/engine/builder"
"github.com/concourse/concourse/atc/fetcher"
"github.com/concourse/concourse/atc/gc"
"github.com/concourse/concourse/atc/lidar"
"github.com/concourse/concourse/atc/lockrunner"
@ -567,8 +565,8 @@ func (cmd *RunCommand) constructAPIMembers(
resourceFactory := resource.NewResourceFactory()
dbResourceCacheFactory := db.NewResourceCacheFactory(dbConn, lockFactory)
fetchSourceFactory := fetcher.NewFetchSourceFactory(dbResourceCacheFactory, resourceFactory)
resourceFetcher := fetcher.NewFetcher(clock.NewClock(), lockFactory, fetchSourceFactory)
fetchSourceFactory := worker.NewFetchSourceFactory(dbResourceCacheFactory, resourceFactory)
resourceFetcher := worker.NewFetcher(clock.NewClock(), lockFactory, fetchSourceFactory)
dbResourceConfigFactory := db.NewResourceConfigFactory(dbConn, lockFactory)
imageResourceFetcherFactory := image.NewImageResourceFetcherFactory(
dbResourceCacheFactory,
@ -745,8 +743,8 @@ func (cmd *RunCommand) constructBackendMembers(
resourceFactory := resource.NewResourceFactory()
dbResourceCacheFactory := db.NewResourceCacheFactory(dbConn, lockFactory)
fetchSourceFactory := fetcher.NewFetchSourceFactory(dbResourceCacheFactory, resourceFactory)
resourceFetcher := fetcher.NewFetcher(clock.NewClock(), lockFactory, fetchSourceFactory)
fetchSourceFactory := worker.NewFetchSourceFactory(dbResourceCacheFactory, resourceFactory)
resourceFetcher := worker.NewFetcher(clock.NewClock(), lockFactory, fetchSourceFactory)
dbResourceConfigFactory := db.NewResourceConfigFactory(dbConn, lockFactory)
imageResourceFetcherFactory := image.NewImageResourceFetcherFactory(
dbResourceCacheFactory,
@ -1453,7 +1451,7 @@ func (cmd *RunCommand) configureComponentIntervals(componentFactory db.Component
func (cmd *RunCommand) constructEngine(
workerPool worker.Pool,
workerClient worker.Client,
resourceFetcher fetcher.Fetcher,
resourceFetcher worker.Fetcher,
teamFactory db.TeamFactory,
resourceCacheFactory db.ResourceCacheFactory,
resourceConfigFactory db.ResourceConfigFactory,

View File

@ -700,7 +700,7 @@ func (fake *FakeBuild) Artifacts() ([]db.WorkerArtifact, error) {
ret, specificReturn := fake.artifactsReturnsOnCall[len(fake.artifactsArgsForCall)]
fake.artifactsArgsForCall = append(fake.artifactsArgsForCall, struct {
}{})
fake.recordInvocation("Artifacts", []interface{}{})
fake.recordInvocation("ArtifactRepository", []interface{}{})
fake.artifactsMutex.Unlock()
if fake.ArtifactsStub != nil {
return fake.ArtifactsStub()

View File

@ -12,6 +12,7 @@ import (
//go:generate counterfeiter . WorkerArtifact
// TODO This should be deprecated
type WorkerArtifact interface {
ID() int
Name() string

View File

@ -13,6 +13,7 @@ import (
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/event"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/vars"
)
@ -90,7 +91,7 @@ func (d *getDelegate) Starting(logger lager.Logger) {
logger.Info("starting")
}
func (d *getDelegate) Finished(logger lager.Logger, exitStatus exec.ExitStatus, info exec.VersionInfo) {
func (d *getDelegate) Finished(logger lager.Logger, exitStatus exec.ExitStatus, info runtime.VersionResult) {
// PR#4398: close to flush stdout and stderr
d.Stdout().(io.Closer).Close()
d.Stderr().(io.Closer).Close()
@ -110,7 +111,7 @@ func (d *getDelegate) Finished(logger lager.Logger, exitStatus exec.ExitStatus,
logger.Info("finished", lager.Data{"exit-status": exitStatus})
}
func (d *getDelegate) UpdateVersion(log lager.Logger, plan atc.GetPlan, info exec.VersionInfo) {
func (d *getDelegate) UpdateVersion(log lager.Logger, plan atc.GetPlan, info runtime.VersionResult) {
logger := log.WithData(lager.Data{
"pipeline-name": d.build.PipelineName(),
"pipeline-id": d.build.PipelineID()},
@ -192,7 +193,7 @@ func (d *putDelegate) Starting(logger lager.Logger) {
logger.Info("starting")
}
func (d *putDelegate) Finished(logger lager.Logger, exitStatus exec.ExitStatus, info exec.VersionInfo) {
func (d *putDelegate) Finished(logger lager.Logger, exitStatus exec.ExitStatus, info runtime.VersionResult) {
// PR#4398: close to flush stdout and stderr
d.Stdout().(io.Closer).Close()
d.Stderr().(io.Closer).Close()
@ -212,7 +213,7 @@ func (d *putDelegate) Finished(logger lager.Logger, exitStatus exec.ExitStatus,
logger.Info("finished", lager.Data{"exit-status": exitStatus, "version-info": info})
}
func (d *putDelegate) SaveOutput(log lager.Logger, plan atc.PutPlan, source atc.Source, resourceTypes atc.VersionedResourceTypes, info exec.VersionInfo) {
func (d *putDelegate) SaveOutput(log lager.Logger, plan atc.PutPlan, source atc.Source, resourceTypes atc.VersionedResourceTypes, info runtime.VersionResult) {
logger := log.WithData(lager.Data{
"step": plan.Name,
"resource": plan.Resource,

View File

@ -2,6 +2,7 @@ package builder_test
import (
"errors"
"github.com/concourse/concourse/atc/runtime"
"io"
"time"
@ -47,12 +48,12 @@ var _ = Describe("DelegateFactory", func() {
Describe("GetDelegate", func() {
var (
delegate exec.GetDelegate
info exec.VersionInfo
info runtime.VersionResult
exitStatus exec.ExitStatus
)
BeforeEach(func() {
info = exec.VersionInfo{
info = runtime.VersionResult{
Version: atc.Version{"foo": "bar"},
Metadata: []atc.MetadataField{{Name: "baz", Value: "shmaz"}},
}
@ -157,12 +158,12 @@ var _ = Describe("DelegateFactory", func() {
Describe("PutDelegate", func() {
var (
delegate exec.PutDelegate
info exec.VersionInfo
info runtime.VersionResult
exitStatus exec.ExitStatus
)
BeforeEach(func() {
info = exec.VersionInfo{
info = runtime.VersionResult{
Version: atc.Version{"foo": "bar"},
Metadata: []atc.MetadataField{{Name: "baz", Value: "shmaz"}},
}

View File

@ -9,7 +9,6 @@ import (
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/fetcher"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/worker"
)
@ -17,7 +16,7 @@ import (
type stepFactory struct {
pool worker.Pool
client worker.Client
resourceFetcher fetcher.Fetcher
resourceFetcher worker.Fetcher
teamFactory db.TeamFactory
resourceCacheFactory db.ResourceCacheFactory
resourceConfigFactory db.ResourceConfigFactory
@ -30,7 +29,7 @@ type stepFactory struct {
func NewStepFactory(
pool worker.Pool,
client worker.Client,
resourceFetcher fetcher.Fetcher,
resourceFetcher worker.Fetcher,
teamFactory db.TeamFactory,
resourceCacheFactory db.ResourceCacheFactory,
resourceConfigFactory db.ResourceConfigFactory,
@ -92,7 +91,7 @@ func (factory *stepFactory) PutStep(
factory.resourceFactory,
factory.resourceConfigFactory,
factory.strategy,
factory.pool,
factory.client,
delegate,
)

View File

@ -6,7 +6,7 @@ import (
"sync"
. "github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -20,7 +20,7 @@ var _ = Describe("Aggregate", func() {
fakeStepA *execfakes.FakeStep
fakeStepB *execfakes.FakeStep
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
step Step
@ -38,7 +38,7 @@ var _ = Describe("Aggregate", func() {
fakeStepB,
}
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)
})

View File

@ -4,11 +4,13 @@ import (
"context"
"fmt"
"github.com/concourse/concourse/atc/runtime"
"code.cloudfoundry.org/lager"
"code.cloudfoundry.org/lager/lagerctx"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/worker"
)
@ -47,7 +49,7 @@ func (step *ArtifactInputStep) Run(ctx context.Context, state RunState) error {
return err
}
volume, found, err := buildArtifact.Volume(step.build.TeamID())
createdVolume, found, err := buildArtifact.Volume(step.build.TeamID())
if err != nil {
return err
}
@ -56,22 +58,34 @@ func (step *ArtifactInputStep) Run(ctx context.Context, state RunState) error {
return ArtifactVolumeNotFoundError{buildArtifact.Name()}
}
workerVolume, found, err := step.workerClient.FindVolume(logger, volume.TeamID(), volume.Handle())
if err != nil {
return err
art := runtime.TaskArtifact{
VolumeHandle: createdVolume.Handle(),
}
if !found {
return ArtifactVolumeNotFoundError{buildArtifact.Name()}
}
//volume, found, err := buildArtifact.Volume(step.build.TeamID())
//if err != nil {
// return err
//}
//
//if !found {
// return ArtifactVolumeNotFoundError{buildArtifact.ArtifactName()}
//}
//
//workerVolume, found, err := step.workerClient.FindVolume(logger, volume.TeamID(), volume.Handle())
//if err != nil {
// return err
//}
//
//if !found {
// return ArtifactVolumeNotFoundError{buildArtifact.ArtifactName()}
//}
logger.Info("register-artifact-source", lager.Data{
"artifact_id": buildArtifact.ID(),
"handle": workerVolume.Handle(),
"handle": art.ID(),
})
source := NewTaskArtifactSource(workerVolume)
state.Artifacts().RegisterSource(artifact.Name(step.plan.ArtifactInput.Name), source)
state.ArtifactRepository().RegisterArtifact(build.ArtifactName(step.plan.ArtifactInput.Name), &art)
step.succeeded = true

View File

@ -124,7 +124,7 @@ var _ = Describe("ArtifactInputStep", func() {
})
It("registers the worker volume as an artifact source", func() {
source, found := state.Artifacts().SourceFor("some-name")
source, found := state.ArtifactRepository().SourceFor("some-name")
Expect(stepErr).NotTo(HaveOccurred())
Expect(found).To(BeTrue())

View File

@ -8,7 +8,7 @@ import (
"code.cloudfoundry.org/lager/lagerctx"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/worker"
)
@ -44,24 +44,28 @@ func (step *ArtifactOutputStep) Run(ctx context.Context, state RunState) error {
outputName := step.plan.ArtifactOutput.Name
source, found := state.Artifacts().SourceFor(artifact.Name(outputName))
buildArtifact, found := state.ArtifactRepository().ArtifactFor(build.ArtifactName(outputName))
if !found {
return ArtifactNotFoundError{outputName}
}
volume, ok := source.(worker.Volume)
if !ok {
volume, found, err := step.workerClient.FindVolume(logger, step.build.TeamID(), buildArtifact.ID())
if err != nil {
return err
}
if !found {
return ArtifactNotFoundError{outputName}
}
artifact, err := volume.InitializeArtifact(outputName, step.build.ID())
dbWorkerArtifact, err := volume.InitializeArtifact(outputName, step.build.ID())
if err != nil {
return err
}
logger.Info("initialize-artifact-from-source", lager.Data{
"handle": volume.Handle(),
"artifact_id": artifact.ID(),
"artifact_id": dbWorkerArtifact.ID(),
})
step.succeeded = true

View File

@ -8,7 +8,7 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db/dbfakes"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
"github.com/concourse/concourse/atc/worker/workerfakes"
. "github.com/onsi/ginkgo"
@ -63,7 +63,7 @@ var _ = Describe("ArtifactOutputStep", func() {
Context("when the source is not a worker.Volume", func() {
BeforeEach(func() {
fakeSource := new(workerfakes.FakeArtifactSource)
state.Artifacts().RegisterSource(artifact.Name("some-name"), fakeSource)
state.ArtifactRepository().RegisterSource(build.ArtifactName("some-name"), fakeSource)
})
It("returns the error", func() {
Expect(stepErr).To(HaveOccurred())
@ -78,7 +78,7 @@ var _ = Describe("ArtifactOutputStep", func() {
fakeWorkerVolume.HandleReturns("handle")
source := exec.NewTaskArtifactSource(fakeWorkerVolume)
state.Artifacts().RegisterSource(artifact.Name("some-name"), source)
state.ArtifactRepository().RegisterSource(build.ArtifactName("some-name"), source)
})
Context("when initializing the artifact fails", func() {

View File

@ -1,4 +1,4 @@
package artifact_test
package build_test
import (
"testing"

View File

@ -7,7 +7,7 @@ import (
"sync"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/worker"
)
@ -282,4 +282,4 @@ func (fake *FakeRegisterableSource) recordInvocation(key string, args []interfac
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ artifact.RegisterableSource = new(FakeRegisterableSource)
var _ build.RegisterableSource = new(FakeRegisterableSource)

View File

@ -0,0 +1,101 @@
// Code generated by counterfeiter. DO NOT EDIT.
package buildfakes
import (
"sync"
"github.com/concourse/concourse/atc/exec/build"
)
type FakeRegisterableArtifact struct {
IDStub func() string
iDMutex sync.RWMutex
iDArgsForCall []struct {
}
iDReturns struct {
result1 string
}
iDReturnsOnCall map[int]struct {
result1 string
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakeRegisterableArtifact) ID() string {
fake.iDMutex.Lock()
ret, specificReturn := fake.iDReturnsOnCall[len(fake.iDArgsForCall)]
fake.iDArgsForCall = append(fake.iDArgsForCall, struct {
}{})
fake.recordInvocation("ID", []interface{}{})
fake.iDMutex.Unlock()
if fake.IDStub != nil {
return fake.IDStub()
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.iDReturns
return fakeReturns.result1
}
func (fake *FakeRegisterableArtifact) IDCallCount() int {
fake.iDMutex.RLock()
defer fake.iDMutex.RUnlock()
return len(fake.iDArgsForCall)
}
func (fake *FakeRegisterableArtifact) IDCalls(stub func() string) {
fake.iDMutex.Lock()
defer fake.iDMutex.Unlock()
fake.IDStub = stub
}
func (fake *FakeRegisterableArtifact) IDReturns(result1 string) {
fake.iDMutex.Lock()
defer fake.iDMutex.Unlock()
fake.IDStub = nil
fake.iDReturns = struct {
result1 string
}{result1}
}
func (fake *FakeRegisterableArtifact) IDReturnsOnCall(i int, result1 string) {
fake.iDMutex.Lock()
defer fake.iDMutex.Unlock()
fake.IDStub = nil
if fake.iDReturnsOnCall == nil {
fake.iDReturnsOnCall = make(map[int]struct {
result1 string
})
}
fake.iDReturnsOnCall[i] = struct {
result1 string
}{result1}
}
func (fake *FakeRegisterableArtifact) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.iDMutex.RLock()
defer fake.iDMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *FakeRegisterableArtifact) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ build.RegisterableArtifact = new(FakeRegisterableArtifact)

View File

@ -1,4 +1,4 @@
package artifact
package build
import (
"code.cloudfoundry.org/lager"
@ -8,14 +8,14 @@ import (
"sync"
"io"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/runtime"
)
// Name is just a string, with its own type to make interfaces using it
// ArtifactName is just a string, with its own type to make interfaces using it
// more self-documenting.
type Name string
type ArtifactName string
// Repository is the mapping from a Name to an ArtifactSource.
// Repository is the mapping from a ArtifactName to an Artifact.
// Steps will both populate this map with new artifacts (e.g. the resource
// fetched by a Get step), and look up required artifacts (e.g. the inputs
// configured for a Task step).
@ -23,54 +23,51 @@ type Name string
// There is only one ArtifactRepository for the duration of a build plan's
// execution.
//
// ArtifactRepository is, itself, an ArtifactSource. As an ArtifactSource it acts
// as the set of all ArtifactSources it contains, as if they were each in
// subdirectories corresponding to their ArtifactName.
type Repository struct {
repo map[Name]worker.ArtifactSource
repo map[ArtifactName]runtime.Artifact
repoL sync.RWMutex
}
// NewArtifactRepository constructs a new repository.
func NewRepository() *Repository {
return &Repository{
repo: make(map[Name]worker.ArtifactSource),
repo: make(map[ArtifactName]runtime.Artifact),
}
}
//go:generate counterfeiter . RegisterableSource
// A RegisterableSource artifact is an ArtifactSource which can be added to the registry
type RegisterableSource interface {
worker.ArtifactSource
//go:generate counterfeiter . RegisterableArtifact
// A RegisterableArtifact is an Artifact which can be added to the registry
type RegisterableArtifact interface {
runtime.Artifact
}
// RegisterSource inserts an ArtifactSource into the map under the given
// ArtifactName. Producers of artifacts, e.g. the Get step and the Task step,
// will call this after they've successfully produced their artifact(s).
func (repo *Repository) RegisterSource(name Name, source RegisterableSource) {
func (repo *Repository) RegisterArtifact(name ArtifactName, artifact RegisterableArtifact) {
repo.repoL.Lock()
repo.repo[name] = source
repo.repo[name] = artifact
repo.repoL.Unlock()
}
// SourceFor looks up a Source for the given ArtifactName. Consumers of
// artifacts, e.g. the Task step, will call this to locate their dependencies.
func (repo *Repository) SourceFor(name Name) (worker.ArtifactSource, bool) {
func (repo *Repository) ArtifactFor(name ArtifactName) (runtime.Artifact, bool) {
repo.repoL.RLock()
source, found := repo.repo[name]
artifact, found := repo.repo[name]
repo.repoL.RUnlock()
return source, found
return artifact, found
}
// AsMap extracts the current contents of the ArtifactRepository into a new map
// and returns it. Changes to the returned map or the ArtifactRepository will not
// affect each other.
func (repo *Repository) AsMap() map[Name]worker.ArtifactSource {
result := make(map[Name]worker.ArtifactSource)
func (repo *Repository) AsMap() map[ArtifactName]runtime.Artifact {
result := make(map[ArtifactName]runtime.Artifact)
repo.repoL.RLock()
for name, source := range repo.repo {
result[name] = source
for name, artifact := range repo.repo {
result[name] = artifact
}
repo.repoL.RUnlock()

View File

@ -1,8 +1,8 @@
package artifact_test
package build_test
import (
. "github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/artifact/artifactfakes"
. "github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/build/artifactfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"io"

View File

@ -5,7 +5,7 @@ import (
"errors"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -19,7 +19,7 @@ var _ = Describe("Ensure Step", func() {
step *execfakes.FakeStep
hook *execfakes.FakeStep
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
ensure exec.Step
@ -44,7 +44,7 @@ var _ = Describe("Ensure Step", func() {
return ctx.Err()
}
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)

View File

@ -9,6 +9,7 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/vars"
)
@ -19,12 +20,12 @@ type FakeGetDelegate struct {
arg1 lager.Logger
arg2 string
}
FinishedStub func(lager.Logger, exec.ExitStatus, exec.VersionInfo)
FinishedStub func(lager.Logger, exec.ExitStatus, runtime.VersionResult)
finishedMutex sync.RWMutex
finishedArgsForCall []struct {
arg1 lager.Logger
arg2 exec.ExitStatus
arg3 exec.VersionInfo
arg3 runtime.VersionResult
}
ImageVersionDeterminedStub func(db.UsedResourceCache) error
imageVersionDeterminedMutex sync.RWMutex
@ -67,12 +68,12 @@ type FakeGetDelegate struct {
stdoutReturnsOnCall map[int]struct {
result1 io.Writer
}
UpdateVersionStub func(lager.Logger, atc.GetPlan, exec.VersionInfo)
UpdateVersionStub func(lager.Logger, atc.GetPlan, runtime.VersionResult)
updateVersionMutex sync.RWMutex
updateVersionArgsForCall []struct {
arg1 lager.Logger
arg2 atc.GetPlan
arg3 exec.VersionInfo
arg3 runtime.VersionResult
}
VariablesStub func() vars.CredVarsTracker
variablesMutex sync.RWMutex
@ -120,12 +121,12 @@ func (fake *FakeGetDelegate) ErroredArgsForCall(i int) (lager.Logger, string) {
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeGetDelegate) Finished(arg1 lager.Logger, arg2 exec.ExitStatus, arg3 exec.VersionInfo) {
func (fake *FakeGetDelegate) Finished(arg1 lager.Logger, arg2 exec.ExitStatus, arg3 runtime.VersionResult) {
fake.finishedMutex.Lock()
fake.finishedArgsForCall = append(fake.finishedArgsForCall, struct {
arg1 lager.Logger
arg2 exec.ExitStatus
arg3 exec.VersionInfo
arg3 runtime.VersionResult
}{arg1, arg2, arg3})
fake.recordInvocation("Finished", []interface{}{arg1, arg2, arg3})
fake.finishedMutex.Unlock()
@ -140,13 +141,13 @@ func (fake *FakeGetDelegate) FinishedCallCount() int {
return len(fake.finishedArgsForCall)
}
func (fake *FakeGetDelegate) FinishedCalls(stub func(lager.Logger, exec.ExitStatus, exec.VersionInfo)) {
func (fake *FakeGetDelegate) FinishedCalls(stub func(lager.Logger, exec.ExitStatus, runtime.VersionResult)) {
fake.finishedMutex.Lock()
defer fake.finishedMutex.Unlock()
fake.FinishedStub = stub
}
func (fake *FakeGetDelegate) FinishedArgsForCall(i int) (lager.Logger, exec.ExitStatus, exec.VersionInfo) {
func (fake *FakeGetDelegate) FinishedArgsForCall(i int) (lager.Logger, exec.ExitStatus, runtime.VersionResult) {
fake.finishedMutex.RLock()
defer fake.finishedMutex.RUnlock()
argsForCall := fake.finishedArgsForCall[i]
@ -379,12 +380,12 @@ func (fake *FakeGetDelegate) StdoutReturnsOnCall(i int, result1 io.Writer) {
}{result1}
}
func (fake *FakeGetDelegate) UpdateVersion(arg1 lager.Logger, arg2 atc.GetPlan, arg3 exec.VersionInfo) {
func (fake *FakeGetDelegate) UpdateVersion(arg1 lager.Logger, arg2 atc.GetPlan, arg3 runtime.VersionResult) {
fake.updateVersionMutex.Lock()
fake.updateVersionArgsForCall = append(fake.updateVersionArgsForCall, struct {
arg1 lager.Logger
arg2 atc.GetPlan
arg3 exec.VersionInfo
arg3 runtime.VersionResult
}{arg1, arg2, arg3})
fake.recordInvocation("UpdateVersion", []interface{}{arg1, arg2, arg3})
fake.updateVersionMutex.Unlock()
@ -399,13 +400,13 @@ func (fake *FakeGetDelegate) UpdateVersionCallCount() int {
return len(fake.updateVersionArgsForCall)
}
func (fake *FakeGetDelegate) UpdateVersionCalls(stub func(lager.Logger, atc.GetPlan, exec.VersionInfo)) {
func (fake *FakeGetDelegate) UpdateVersionCalls(stub func(lager.Logger, atc.GetPlan, runtime.VersionResult)) {
fake.updateVersionMutex.Lock()
defer fake.updateVersionMutex.Unlock()
fake.UpdateVersionStub = stub
}
func (fake *FakeGetDelegate) UpdateVersionArgsForCall(i int) (lager.Logger, atc.GetPlan, exec.VersionInfo) {
func (fake *FakeGetDelegate) UpdateVersionArgsForCall(i int) (lager.Logger, atc.GetPlan, runtime.VersionResult) {
fake.updateVersionMutex.RLock()
defer fake.updateVersionMutex.RUnlock()
argsForCall := fake.updateVersionArgsForCall[i]

View File

@ -9,6 +9,7 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/vars"
)
@ -19,12 +20,12 @@ type FakePutDelegate struct {
arg1 lager.Logger
arg2 string
}
FinishedStub func(lager.Logger, exec.ExitStatus, exec.VersionInfo)
FinishedStub func(lager.Logger, exec.ExitStatus, runtime.VersionResult)
finishedMutex sync.RWMutex
finishedArgsForCall []struct {
arg1 lager.Logger
arg2 exec.ExitStatus
arg3 exec.VersionInfo
arg3 runtime.VersionResult
}
ImageVersionDeterminedStub func(db.UsedResourceCache) error
imageVersionDeterminedMutex sync.RWMutex
@ -42,14 +43,14 @@ type FakePutDelegate struct {
initializingArgsForCall []struct {
arg1 lager.Logger
}
SaveOutputStub func(lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, exec.VersionInfo)
SaveOutputStub func(lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, runtime.VersionResult)
saveOutputMutex sync.RWMutex
saveOutputArgsForCall []struct {
arg1 lager.Logger
arg2 atc.PutPlan
arg3 atc.Source
arg4 atc.VersionedResourceTypes
arg5 exec.VersionInfo
arg5 runtime.VersionResult
}
StartingStub func(lager.Logger)
startingMutex sync.RWMutex
@ -122,12 +123,12 @@ func (fake *FakePutDelegate) ErroredArgsForCall(i int) (lager.Logger, string) {
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakePutDelegate) Finished(arg1 lager.Logger, arg2 exec.ExitStatus, arg3 exec.VersionInfo) {
func (fake *FakePutDelegate) Finished(arg1 lager.Logger, arg2 exec.ExitStatus, arg3 runtime.VersionResult) {
fake.finishedMutex.Lock()
fake.finishedArgsForCall = append(fake.finishedArgsForCall, struct {
arg1 lager.Logger
arg2 exec.ExitStatus
arg3 exec.VersionInfo
arg3 runtime.VersionResult
}{arg1, arg2, arg3})
fake.recordInvocation("Finished", []interface{}{arg1, arg2, arg3})
fake.finishedMutex.Unlock()
@ -142,13 +143,13 @@ func (fake *FakePutDelegate) FinishedCallCount() int {
return len(fake.finishedArgsForCall)
}
func (fake *FakePutDelegate) FinishedCalls(stub func(lager.Logger, exec.ExitStatus, exec.VersionInfo)) {
func (fake *FakePutDelegate) FinishedCalls(stub func(lager.Logger, exec.ExitStatus, runtime.VersionResult)) {
fake.finishedMutex.Lock()
defer fake.finishedMutex.Unlock()
fake.FinishedStub = stub
}
func (fake *FakePutDelegate) FinishedArgsForCall(i int) (lager.Logger, exec.ExitStatus, exec.VersionInfo) {
func (fake *FakePutDelegate) FinishedArgsForCall(i int) (lager.Logger, exec.ExitStatus, runtime.VersionResult) {
fake.finishedMutex.RLock()
defer fake.finishedMutex.RUnlock()
argsForCall := fake.finishedArgsForCall[i]
@ -246,14 +247,14 @@ func (fake *FakePutDelegate) InitializingArgsForCall(i int) lager.Logger {
return argsForCall.arg1
}
func (fake *FakePutDelegate) SaveOutput(arg1 lager.Logger, arg2 atc.PutPlan, arg3 atc.Source, arg4 atc.VersionedResourceTypes, arg5 exec.VersionInfo) {
func (fake *FakePutDelegate) SaveOutput(arg1 lager.Logger, arg2 atc.PutPlan, arg3 atc.Source, arg4 atc.VersionedResourceTypes, arg5 runtime.VersionResult) {
fake.saveOutputMutex.Lock()
fake.saveOutputArgsForCall = append(fake.saveOutputArgsForCall, struct {
arg1 lager.Logger
arg2 atc.PutPlan
arg3 atc.Source
arg4 atc.VersionedResourceTypes
arg5 exec.VersionInfo
arg5 runtime.VersionResult
}{arg1, arg2, arg3, arg4, arg5})
fake.recordInvocation("SaveOutput", []interface{}{arg1, arg2, arg3, arg4, arg5})
fake.saveOutputMutex.Unlock()
@ -268,13 +269,13 @@ func (fake *FakePutDelegate) SaveOutputCallCount() int {
return len(fake.saveOutputArgsForCall)
}
func (fake *FakePutDelegate) SaveOutputCalls(stub func(lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, exec.VersionInfo)) {
func (fake *FakePutDelegate) SaveOutputCalls(stub func(lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, runtime.VersionResult)) {
fake.saveOutputMutex.Lock()
defer fake.saveOutputMutex.Unlock()
fake.SaveOutputStub = stub
}
func (fake *FakePutDelegate) SaveOutputArgsForCall(i int) (lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, exec.VersionInfo) {
func (fake *FakePutDelegate) SaveOutputArgsForCall(i int) (lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, runtime.VersionResult) {
fake.saveOutputMutex.RLock()
defer fake.saveOutputMutex.RUnlock()
argsForCall := fake.saveOutputArgsForCall[i]

View File

@ -6,19 +6,19 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
)
type FakeRunState struct {
ArtifactsStub func() *artifact.Repository
ArtifactsStub func() *build.Repository
artifactsMutex sync.RWMutex
artifactsArgsForCall []struct {
}
artifactsReturns struct {
result1 *artifact.Repository
result1 *build.Repository
}
artifactsReturnsOnCall map[int]struct {
result1 *artifact.Repository
result1 *build.Repository
}
ResultStub func(atc.PlanID, interface{}) bool
resultMutex sync.RWMutex
@ -42,12 +42,12 @@ type FakeRunState struct {
invocationsMutex sync.RWMutex
}
func (fake *FakeRunState) Artifacts() *artifact.Repository {
func (fake *FakeRunState) ArtifactRepository() *build.Repository {
fake.artifactsMutex.Lock()
ret, specificReturn := fake.artifactsReturnsOnCall[len(fake.artifactsArgsForCall)]
fake.artifactsArgsForCall = append(fake.artifactsArgsForCall, struct {
}{})
fake.recordInvocation("Artifacts", []interface{}{})
fake.recordInvocation("ArtifactRepository", []interface{}{})
fake.artifactsMutex.Unlock()
if fake.ArtifactsStub != nil {
return fake.ArtifactsStub()
@ -65,32 +65,32 @@ func (fake *FakeRunState) ArtifactsCallCount() int {
return len(fake.artifactsArgsForCall)
}
func (fake *FakeRunState) ArtifactsCalls(stub func() *artifact.Repository) {
func (fake *FakeRunState) ArtifactsCalls(stub func() *build.Repository) {
fake.artifactsMutex.Lock()
defer fake.artifactsMutex.Unlock()
fake.ArtifactsStub = stub
}
func (fake *FakeRunState) ArtifactsReturns(result1 *artifact.Repository) {
func (fake *FakeRunState) ArtifactsReturns(result1 *build.Repository) {
fake.artifactsMutex.Lock()
defer fake.artifactsMutex.Unlock()
fake.ArtifactsStub = nil
fake.artifactsReturns = struct {
result1 *artifact.Repository
result1 *build.Repository
}{result1}
}
func (fake *FakeRunState) ArtifactsReturnsOnCall(i int, result1 *artifact.Repository) {
func (fake *FakeRunState) ArtifactsReturnsOnCall(i int, result1 *build.Repository) {
fake.artifactsMutex.Lock()
defer fake.artifactsMutex.Unlock()
fake.ArtifactsStub = nil
if fake.artifactsReturnsOnCall == nil {
fake.artifactsReturnsOnCall = make(map[int]struct {
result1 *artifact.Repository
result1 *build.Repository
})
}
fake.artifactsReturnsOnCall[i] = struct {
result1 *artifact.Repository
result1 *build.Repository
}{result1}
}

View File

@ -8,16 +8,16 @@ import (
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
)
type FakeTaskConfigSource struct {
FetchConfigStub func(context.Context, lager.Logger, *artifact.Repository) (atc.TaskConfig, error)
FetchConfigStub func(context.Context, lager.Logger, *build.Repository) (atc.TaskConfig, error)
fetchConfigMutex sync.RWMutex
fetchConfigArgsForCall []struct {
arg1 context.Context
arg2 lager.Logger
arg3 *artifact.Repository
arg3 *build.Repository
}
fetchConfigReturns struct {
result1 atc.TaskConfig
@ -41,13 +41,13 @@ type FakeTaskConfigSource struct {
invocationsMutex sync.RWMutex
}
func (fake *FakeTaskConfigSource) FetchConfig(arg1 context.Context, arg2 lager.Logger, arg3 *artifact.Repository) (atc.TaskConfig, error) {
func (fake *FakeTaskConfigSource) FetchConfig(arg1 context.Context, arg2 lager.Logger, arg3 *build.Repository) (atc.TaskConfig, error) {
fake.fetchConfigMutex.Lock()
ret, specificReturn := fake.fetchConfigReturnsOnCall[len(fake.fetchConfigArgsForCall)]
fake.fetchConfigArgsForCall = append(fake.fetchConfigArgsForCall, struct {
arg1 context.Context
arg2 lager.Logger
arg3 *artifact.Repository
arg3 *build.Repository
}{arg1, arg2, arg3})
fake.recordInvocation("FetchConfig", []interface{}{arg1, arg2, arg3})
fake.fetchConfigMutex.Unlock()
@ -67,13 +67,13 @@ func (fake *FakeTaskConfigSource) FetchConfigCallCount() int {
return len(fake.fetchConfigArgsForCall)
}
func (fake *FakeTaskConfigSource) FetchConfigCalls(stub func(context.Context, lager.Logger, *artifact.Repository) (atc.TaskConfig, error)) {
func (fake *FakeTaskConfigSource) FetchConfigCalls(stub func(context.Context, lager.Logger, *build.Repository) (atc.TaskConfig, error)) {
fake.fetchConfigMutex.Lock()
defer fake.fetchConfigMutex.Unlock()
fake.FetchConfigStub = stub
}
func (fake *FakeTaskConfigSource) FetchConfigArgsForCall(i int) (context.Context, lager.Logger, *artifact.Repository) {
func (fake *FakeTaskConfigSource) FetchConfigArgsForCall(i int) (context.Context, lager.Logger, *build.Repository) {
fake.fetchConfigMutex.RLock()
defer fake.fetchConfigMutex.RUnlock()
argsForCall := fake.fetchConfigArgsForCall[i]

View File

@ -1,24 +1,20 @@
package exec
import (
"archive/tar"
"context"
"fmt"
"github.com/concourse/concourse/vars"
"io"
"github.com/hashicorp/go-multierror"
"code.cloudfoundry.org/lager"
"code.cloudfoundry.org/lager/lagerctx"
"github.com/DataDog/zstd"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/creds"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/fetcher"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/vars"
)
type ErrPipelineNotFound struct {
@ -49,10 +45,10 @@ type GetDelegate interface {
Initializing(lager.Logger)
Starting(lager.Logger)
Finished(lager.Logger, ExitStatus, VersionInfo)
Finished(lager.Logger, ExitStatus, runtime.VersionResult)
Errored(lager.Logger, string)
UpdateVersion(lager.Logger, atc.GetPlan, VersionInfo)
UpdateVersion(lager.Logger, atc.GetPlan, runtime.VersionResult)
}
// GetStep will fetch a version of a resource on a worker that supports the
@ -62,9 +58,10 @@ type GetStep struct {
plan atc.GetPlan
metadata StepMetadata
containerMetadata db.ContainerMetadata
resourceFetcher fetcher.Fetcher
resourceFetcher worker.Fetcher
resourceCacheFactory db.ResourceCacheFactory
strategy worker.ContainerPlacementStrategy
workerClient worker.Client
workerPool worker.Pool
delegate GetDelegate
succeeded bool
@ -75,7 +72,7 @@ func NewGetStep(
plan atc.GetPlan,
metadata StepMetadata,
containerMetadata db.ContainerMetadata,
resourceFetcher fetcher.Fetcher,
resourceFetcher worker.Fetcher,
resourceCacheFactory db.ResourceCacheFactory,
strategy worker.ContainerPlacementStrategy,
workerPool worker.Pool,
@ -176,6 +173,7 @@ func (step *GetStep) Run(ctx context.Context, state RunState) error {
return err
}
// TODO containerOwner accepts workerName and this should be extracted out
resourceInstance := resource.NewResourceInstance(
resource.ResourceType(step.plan.Type),
version,
@ -185,60 +183,82 @@ func (step *GetStep) Run(ctx context.Context, state RunState) error {
resourceCache,
db.NewBuildStepContainerOwner(step.metadata.BuildID, step.planID, step.metadata.TeamID),
)
// Stuff above is all part of Concourse CORE
chosenWorker, err := step.workerPool.FindOrChooseWorkerForContainer(
events := make(chan runtime.Event, 1)
go func(logger lager.Logger, events chan runtime.Event, delegate GetDelegate) {
for {
ev := <-events
switch {
case ev.EventType == runtime.InitializingEvent:
step.delegate.Initializing(logger)
case ev.EventType == runtime.StartingEvent:
step.delegate.Starting(logger)
case ev.EventType == runtime.FinishedEvent:
step.delegate.Finished(logger, ExitStatus(ev.ExitStatus), ev.VersionResult)
default:
return
}
}
}(logger, events, step.delegate)
resourceDir := resource.ResourcesDir("get")
// start of workerClient.RunGetStep?
getResult, err := step.workerClient.RunGetStep(
ctx,
logger,
resourceInstance.ContainerOwner(),
containerSpec,
workerSpec,
step.strategy,
)
if err != nil {
return err
}
step.delegate.Starting(logger)
versionedSource, err := step.resourceFetcher.Fetch(
ctx,
logger,
step.containerMetadata,
chosenWorker,
containerSpec,
resourceTypes,
resourceInstance,
step.resourceFetcher,
step.delegate,
resourceCache,
worker.ProcessSpec{
Path: "/opt/resource/out",
Args: []string{resourceDir},
StdoutWriter: step.delegate.Stdout(),
StderrWriter: step.delegate.Stderr(),
},
events,
)
if err != nil {
logger.Error("failed-to-fetch-resource", err)
if err, ok := err.(resource.ErrResourceScriptFailed); ok {
step.delegate.Finished(logger, ExitStatus(err.ExitStatus), VersionInfo{})
return nil
}
return err
}
state.Artifacts().RegisterSource(artifact.Name(step.plan.Name), &getArtifactSource{
resourceInstance: resourceInstance,
versionedSource: versionedSource,
})
if getResult.Status == 0 {
// TODO move all the state changing logic from resourceInstnanceFetchSource.Create to here
//err = volume.InitializeResourceCache(s.resourceInstance.ResourceCache())
//if err != nil {
// sLog.Error("failed-to-initialize-cache", err)
// return nil, err
//}
//
//err = s.dbResourceCacheFactory.UpdateResourceCacheMetadata(s.resourceInstance.ResourceCache(), versionedSource.Metadata())
//if err != nil {
// s.logger.Error("failed-to-update-resource-cache-metadata", err, lager.Data{"resource-cache": s.resourceInstance.ResourceCache()})
// return nil, err
//}
versionInfo := VersionInfo{
Version: versionedSource.Version(),
Metadata: versionedSource.Metadata(),
state.ArtifactRepository().RegisterArtifact(build.ArtifactName(step.plan.Name), &getResult.GetArtifact)
if step.plan.Resource != "" {
step.delegate.UpdateVersion(logger, step.plan, getResult.VersionResult)
}
step.succeeded = true
} else {
// TODO have a way of bubbling up the error message from getResult.Err
}
if step.plan.Resource != "" {
step.delegate.UpdateVersion(logger, step.plan, versionInfo)
}
step.succeeded = true
step.delegate.Finished(logger, 0, versionInfo)
return nil
}
@ -247,103 +267,10 @@ func (step *GetStep) Succeeded() bool {
return step.succeeded
}
type getArtifactSource struct {
resourceInstance resource.ResourceInstance
versionedSource resource.VersionedSource
}
// VolumeOn locates the cache for the GetStep's resource and version on the
// given worker.
func (s *getArtifactSource) VolumeOn(logger lager.Logger, worker worker.Worker) (worker.Volume, bool, error) {
return s.resourceInstance.FindOn(logger.Session("volume-on"), worker)
}
// StreamTo streams the resource's data to the destination.
func (s *getArtifactSource) StreamTo(ctx context.Context, logger lager.Logger, destination worker.ArtifactDestination) error {
return streamToHelper(ctx, s.versionedSource, logger, destination)
}
// StreamFile streams a single file out of the resource.
func (s *getArtifactSource) StreamFile(ctx context.Context, logger lager.Logger, path string) (io.ReadCloser, error) {
return streamFileHelper(ctx, s.versionedSource, logger, path)
}
func streamToHelper(
ctx context.Context,
s interface {
StreamOut(context.Context, string) (io.ReadCloser, error)
},
logger lager.Logger,
destination worker.ArtifactDestination,
) error {
logger.Debug("start")
defer logger.Debug("end")
out, err := s.StreamOut(ctx, ".")
if err != nil {
logger.Error("failed", err)
return err
}
defer out.Close()
err = destination.StreamIn(ctx, ".", out)
if err != nil {
logger.Error("failed", err)
return err
}
return nil
}
func streamFileHelper(
ctx context.Context,
s interface {
StreamOut(context.Context, string) (io.ReadCloser, error)
},
logger lager.Logger,
path string,
) (io.ReadCloser, error) {
out, err := s.StreamOut(ctx, path)
if err != nil {
return nil, err
}
zstdReader := zstd.NewReader(out)
tarReader := tar.NewReader(zstdReader)
_, err = tarReader.Next()
if err != nil {
return nil, FileNotFoundError{Path: path}
}
return fileReadMultiCloser{
reader: tarReader,
closers: []io.Closer{
out,
zstdReader,
},
}, nil
}
type fileReadMultiCloser struct {
reader io.Reader
closers []io.Closer
}
func (frc fileReadMultiCloser) Read(p []byte) (n int, err error) {
return frc.reader.Read(p)
}
func (frc fileReadMultiCloser) Close() error {
var closeErrors error
for _, closer := range frc.closers {
err := closer.Close()
if err != nil {
closeErrors = multierror.Append(closeErrors, err)
}
}
return closeErrors
}
//type GetArtifact struct {
// volumeHandle string
//}
//
//func (art *GetArtifact) ID() string {
// return art.volumeHandle
//}

View File

@ -19,11 +19,12 @@ import (
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/dbfakes"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
"github.com/concourse/concourse/atc/fetcher/fetcherfakes"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/resource/resourcefakes"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/worker/workerfakes"
"github.com/concourse/concourse/vars"
@ -46,7 +47,7 @@ var _ = Describe("GetStep", func() {
fakeVersionedSource *resourcefakes.FakeVersionedSource
interpolatedResourceTypes atc.VersionedResourceTypes
artifactRepository *artifact.Repository
artifactRepository *build.Repository
state *execfakes.FakeRunState
getStep exec.Step
@ -86,7 +87,7 @@ var _ = Describe("GetStep", func() {
credVars := vars.StaticVariables{"source-param": "super-secret-source"}
credVarsTracker = vars.NewCredVarsTracker(credVars, true)
artifactRepository = artifact.NewRepository()
artifactRepository = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(artifactRepository)
@ -410,7 +411,7 @@ var _ = Describe("GetStep", func() {
Context("but the stream is empty", func() {
It("returns ErrFileNotFound", func() {
_, err := artifactSource.StreamFile(context.TODO(), testLogger, "some-path")
Expect(err).To(MatchError(exec.FileNotFoundError{Path: "some-path"}))
Expect(err).To(MatchError(runtime.FileNotFoundError{Path: "some-path"}))
})
})
})

View File

@ -8,7 +8,7 @@ import (
"time"
. "github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -23,7 +23,7 @@ var _ = Describe("Parallel", func() {
fakeStepB *execfakes.FakeStep
fakeSteps []Step
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
step Step
@ -39,7 +39,7 @@ var _ = Describe("Parallel", func() {
step = InParallel(fakeSteps, len(fakeSteps), false)
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)
})

View File

@ -5,7 +5,7 @@ import (
"errors"
. "github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -19,7 +19,7 @@ var _ = Describe("LogErrorStep", func() {
fakeStep *execfakes.FakeStep
fakeDelegate *execfakes.FakeBuildStepDelegate
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
step Step
@ -31,7 +31,7 @@ var _ = Describe("LogErrorStep", func() {
fakeStep = new(execfakes.FakeStep)
fakeDelegate = new(execfakes.FakeBuildStepDelegate)
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)

View File

@ -5,7 +5,7 @@ import (
"errors"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -19,7 +19,7 @@ var _ = Describe("On Abort Step", func() {
step *execfakes.FakeStep
hook *execfakes.FakeStep
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
onAbortStep exec.Step
@ -33,7 +33,7 @@ var _ = Describe("On Abort Step", func() {
step = &execfakes.FakeStep{}
hook = &execfakes.FakeStep{}
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)

View File

@ -5,7 +5,7 @@ import (
"errors"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
"github.com/hashicorp/go-multierror"
. "github.com/onsi/ginkgo"
@ -20,7 +20,7 @@ var _ = Describe("On Error Step", func() {
step *execfakes.FakeStep
hook *execfakes.FakeStep
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
onErrorStep exec.Step
@ -36,7 +36,7 @@ var _ = Describe("On Error Step", func() {
step = &execfakes.FakeStep{}
hook = &execfakes.FakeStep{}
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)

View File

@ -5,7 +5,7 @@ import (
"errors"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -19,7 +19,7 @@ var _ = Describe("On Failure Step", func() {
step *execfakes.FakeStep
hook *execfakes.FakeStep
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
onFailureStep exec.Step
@ -33,7 +33,7 @@ var _ = Describe("On Failure Step", func() {
step = &execfakes.FakeStep{}
hook = &execfakes.FakeStep{}
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)

View File

@ -5,7 +5,7 @@ import (
"errors"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -19,7 +19,7 @@ var _ = Describe("On Success Step", func() {
step *execfakes.FakeStep
hook *execfakes.FakeStep
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
onSuccessStep exec.Step
@ -33,7 +33,7 @@ var _ = Describe("On Success Step", func() {
step = &execfakes.FakeStep{}
hook = &execfakes.FakeStep{}
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)

View File

@ -1,11 +1,11 @@
package exec
import (
"context"
"fmt"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/worker"
)
@ -19,7 +19,7 @@ func (e PutInputNotFoundError) Error() string {
}
type PutInputs interface {
FindAll(*artifact.Repository) ([]worker.InputSource, error)
FindAll(*build.Repository) ([]worker.FooBarInput, error)
}
type allInputs struct{}
@ -28,13 +28,13 @@ func NewAllInputs() PutInputs {
return &allInputs{}
}
func (i allInputs) FindAll(artifacts *artifact.Repository) ([]worker.InputSource, error) {
inputs := []worker.InputSource{}
func (i allInputs) FindAll(artifacts *build.Repository) ([]worker.FooBarInput, error) {
inputs := []worker.FooBarInput{}
for name, source := range artifacts.AsMap() {
inputs = append(inputs, &putInputSource{
name: name,
source: PutResourceSource{source},
for name, artifact := range artifacts.AsMap() {
inputs = append(inputs, &putFooBarInput{
name: name,
artifact: artifact,
})
}
@ -51,40 +51,36 @@ func NewSpecificInputs(inputs []string) PutInputs {
}
}
func (i specificInputs) FindAll(artifacts *artifact.Repository) ([]worker.InputSource, error) {
func (i specificInputs) FindAll(artifacts *build.Repository) ([]worker.FooBarInput, error) {
artifactsMap := artifacts.AsMap()
inputs := []worker.InputSource{}
inputs := []worker.FooBarInput{}
for _, i := range i.inputs {
artifactSource, found := artifactsMap[artifact.Name(i)]
artifact, found := artifactsMap[build.ArtifactName(i)]
if !found {
return nil, PutInputNotFoundError{Input: i}
}
inputs = append(inputs, &putInputSource{
name: artifact.Name(i),
source: PutResourceSource{artifactSource},
inputs = append(inputs, &putFooBarInput{
name: build.ArtifactName(i),
artifact: artifact,
})
}
return inputs, nil
}
type putInputSource struct {
name artifact.Name
source worker.ArtifactSource
type putFooBarInput struct {
name build.ArtifactName
artifact runtime.Artifact
}
func (s *putInputSource) Source() worker.ArtifactSource { return s.source }
func (s *putFooBarInput) Artifact() runtime.Artifact { return s.artifact }
func (s *putInputSource) DestinationPath() string {
func (s *putFooBarInput) DestinationPath() string {
return resource.ResourcesDir("put/" + string(s.name))
}
type PutResourceSource struct {
worker.ArtifactSource
}
func (source PutResourceSource) StreamTo(ctx context.Context, logger lager.Logger, dest worker.ArtifactDestination) error {
return source.ArtifactSource.StreamTo(ctx, logger, dest)
}
//func (source PutResourceSource) StreamTo(ctx context.Context, logger lager.Logger, dest worker.ArtifactDestination) error {
// return source.ArtifactSource.StreamTo(ctx, logger, dest)
//}

View File

@ -2,15 +2,17 @@ package exec
import (
"context"
"github.com/concourse/concourse/vars"
"io"
"github.com/concourse/concourse/vars"
"code.cloudfoundry.org/lager"
"code.cloudfoundry.org/lager/lagerctx"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/creds"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc/worker"
)
@ -26,7 +28,7 @@ type PutDelegate interface {
Initializing(lager.Logger)
Starting(lager.Logger)
Finished(lager.Logger, ExitStatus, VersionInfo)
Finished(lager.Logger, ExitStatus, runtime.VersionResult)
Errored(lager.Logger, string)
SaveOutput(lager.Logger, atc.PutPlan, atc.Source, atc.VersionedResourceTypes, VersionInfo)
@ -42,7 +44,7 @@ type PutStep struct {
resourceFactory resource.ResourceFactory
resourceConfigFactory db.ResourceConfigFactory
strategy worker.ContainerPlacementStrategy
pool worker.Pool
workerClient worker.Client
delegate PutDelegate
succeeded bool
}
@ -55,7 +57,7 @@ func NewPutStep(
resourceFactory resource.ResourceFactory,
resourceConfigFactory db.ResourceConfigFactory,
strategy worker.ContainerPlacementStrategy,
pool worker.Pool,
workerClient worker.Client,
delegate PutDelegate,
) *PutStep {
return &PutStep{
@ -65,7 +67,7 @@ func NewPutStep(
containerMetadata: containerMetadata,
resourceFactory: resourceFactory,
resourceConfigFactory: resourceConfigFactory,
pool: pool,
workerClient: workerClient,
strategy: strategy,
delegate: delegate,
}
@ -86,7 +88,7 @@ func (step *PutStep) Run(ctx context.Context, state RunState) error {
"job-id": step.metadata.JobID,
})
step.delegate.Initializing(logger)
//step.delegate.Initializing(logger)
variables := step.delegate.Variables()
@ -118,7 +120,7 @@ func (step *PutStep) Run(ctx context.Context, state RunState) error {
putInputs = NewSpecificInputs(step.plan.Inputs.Specified)
}
containerInputs, err := putInputs.FindAll(state.Artifacts())
containerInputs, err := putInputs.FindAll(state.ArtifactRepository())
if err != nil {
return err
}
@ -134,7 +136,7 @@ func (step *PutStep) Run(ctx context.Context, state RunState) error {
Env: step.metadata.Env(),
Inputs: containerInputs,
InputFooBars: containerInputs,
}
workerSpec := worker.WorkerSpec{
@ -146,75 +148,97 @@ func (step *PutStep) Run(ctx context.Context, state RunState) error {
owner := db.NewBuildStepContainerOwner(step.metadata.BuildID, step.planID, step.metadata.TeamID)
chosenWorker, err := step.pool.FindOrChooseWorkerForContainer(
containerSpec.BindMounts = []worker.BindMountSource{
&worker.CertsVolumeMount{Logger: logger},
}
imageSpec := worker.ImageFetcherSpec{
ResourceTypes: resourceTypes,
Delegate: step.delegate,
}
events := make(chan runtime.Event, 1)
go func(logger lager.Logger, events chan runtime.Event, delegate PutDelegate) {
for {
ev := <-events
switch {
case ev.EventType == runtime.InitializingEvent:
step.delegate.Initializing(logger)
case ev.EventType == runtime.StartingEvent:
step.delegate.Starting(logger)
default:
return
}
}
}(logger, events, step.delegate)
// TODO: this might be duplicate. check if client ever calls Initializing?
step.delegate.Initializing(logger)
resourceDir := resource.ResourcesDir("put")
result := step.workerClient.RunPutStep(
ctx,
logger,
owner,
containerSpec,
workerSpec,
step.strategy,
)
if err != nil {
return err
}
containerSpec.BindMounts = []worker.BindMountSource{
&worker.CertsVolumeMount{Logger: logger},
}
container, err := chosenWorker.FindOrCreateContainer(
ctx,
logger,
step.delegate,
owner,
step.containerMetadata,
containerSpec,
resourceTypes,
)
if err != nil {
return err
}
step.delegate.Starting(logger)
putResource := step.resourceFactory.NewResourceForContainer(container)
versionResult, err := putResource.Put(
ctx,
resource.IOConfig{
Stdout: step.delegate.Stdout(),
Stderr: step.delegate.Stderr(),
},
source,
params,
step.strategy,
step.containerMetadata,
imageSpec,
resourceDir,
worker.ProcessSpec{
Path: "/opt/resource/out",
Args: []string{resourceDir},
StdoutWriter: step.delegate.Stdout(),
StderrWriter: step.delegate.Stderr(),
},
events,
)
versionResult := result.VersionResult
err = result.Err
// TODO: Add in code to actually use the resource interface. Example here:
//putResource := step.resourceFactory.NewResourceForContainer(container)
//versionResult, err := putResource.Put(
// ctx,
// resource.IOConfig{
// Stdout: step.delegate.Stdout(),
// Stderr: step.delegate.Stderr(),
// },
// source,
// params,
//)
//
if err != nil {
logger.Error("failed-to-put-resource", err)
if err, ok := err.(resource.ErrResourceScriptFailed); ok {
step.delegate.Finished(logger, ExitStatus(err.ExitStatus), VersionInfo{})
step.delegate.Finished(logger, ExitStatus(err.ExitStatus), runtime.VersionResult{})
return nil
}
return err
}
versionInfo := VersionInfo{
Version: versionResult.Version,
Metadata: versionResult.Metadata,
}
if step.plan.Resource != "" {
step.delegate.SaveOutput(logger, step.plan, source, resourceTypes, versionInfo)
step.delegate.SaveOutput(logger, step.plan, source, resourceTypes, versionResult)
}
state.StoreResult(step.planID, versionInfo)
state.StoreResult(step.planID, versionResult)
step.succeeded = true
step.delegate.Finished(logger, 0, versionInfo)
// TODO This should happen in client.RuntGetStep itself similar to TaskStep
step.delegate.Finished(logger, 0, versionResult)
return nil
}
// Succeeded returns true if the resource script exited successfully.

View File

@ -12,10 +12,11 @@ import (
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/dbfakes"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/resource/resourcefakes"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/worker/workerfakes"
"github.com/concourse/concourse/vars"
@ -27,7 +28,7 @@ var _ = Describe("PutStep", func() {
cancel func()
fakeWorker *workerfakes.FakeWorker
fakePool *workerfakes.FakePool
fakeClient *workerfakes.FakeClient
fakeStrategy *workerfakes.FakeContainerPlacementStrategy
fakeResourceFactory *resourcefakes.FakeResourceFactory
fakeResourceConfigFactory *dbfakes.FakeResourceConfigFactory
@ -51,7 +52,7 @@ var _ = Describe("PutStep", func() {
PipelineName: "some-pipeline",
}
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
putStep *exec.PutStep
@ -63,6 +64,9 @@ var _ = Describe("PutStep", func() {
stderrBuf *gbytes.Buffer
planID atc.PlanID
versionResult runtime.VersionResult
clientErr error
)
BeforeEach(func() {
@ -71,7 +75,7 @@ var _ = Describe("PutStep", func() {
planID = atc.PlanID("some-plan-id")
fakeStrategy = new(workerfakes.FakeContainerPlacementStrategy)
fakePool = new(workerfakes.FakePool)
fakeClient = new(workerfakes.FakeClient)
fakeWorker = new(workerfakes.FakeWorker)
fakeResourceFactory = new(resourcefakes.FakeResourceFactory)
fakeResourceConfigFactory = new(dbfakes.FakeResourceConfigFactory)
@ -86,7 +90,7 @@ var _ = Describe("PutStep", func() {
fakeDelegate.StderrReturns(stderrBuf)
fakeDelegate.VariablesReturns(vars.NewCredVarsTracker(credVarsTracker, false))
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)
@ -121,6 +125,11 @@ var _ = Describe("PutStep", func() {
Tags: []string{"some", "tags"},
VersionedResourceTypes: uninterpolatedResourceTypes,
}
versionResult = runtime.VersionResult{
Version: atc.Version{"some": "version"},
Metadata: []atc.MetadataField{{Name: "some", Value: "metadata"}},
}
})
AfterEach(func() {
@ -133,6 +142,8 @@ var _ = Describe("PutStep", func() {
Put: putPlan,
}
fakeClient.RunPutStepReturns(worker.PutResult{Status: 0, VersionResult: versionResult, Err: clientErr})
putStep = exec.NewPutStep(
plan.ID,
*plan.Put,
@ -141,7 +152,7 @@ var _ = Describe("PutStep", func() {
fakeResourceFactory,
fakeResourceConfigFactory,
fakeStrategy,
fakePool,
fakeClient,
fakeDelegate,
)
@ -163,13 +174,48 @@ var _ = Describe("PutStep", func() {
repo.RegisterSource("some-source", fakeSource)
repo.RegisterSource("some-other-source", fakeOtherSource)
repo.RegisterSource("some-mounted-source", fakeMountedSource)
})
It("finds/chooses a worker and creates a container with the correct type, session, and sources with no inputs specified (meaning it takes all artifacts)", func() {
Expect(fakeClient.RunPutStepCallCount()).To(Equal(1))
_, _, actualOwner, actualContainerSpec, actualWorkerSpec, _, _, strategy, _, actualImageFetcherSpec, _, _, _ := fakeClient.RunPutStepArgsForCall(0)
Expect(actualOwner).To(Equal(db.NewBuildStepContainerOwner(42, atc.PlanID(planID), 123)))
Expect(actualContainerSpec.ImageSpec).To(Equal(worker.ImageSpec{
ResourceType: "some-resource-type",
}))
Expect(actualContainerSpec.Tags).To(Equal([]string{"some", "tags"}))
Expect(actualContainerSpec.TeamID).To(Equal(123))
Expect(actualContainerSpec.Env).To(Equal(stepMetadata.Env()))
Expect(actualContainerSpec.Dir).To(Equal("/tmp/build/put"))
Expect(actualContainerSpec.Inputs).To(HaveLen(3))
Expect(actualWorkerSpec).To(Equal(worker.WorkerSpec{
TeamID: 123,
Tags: []string{"some", "tags"},
ResourceType: "some-resource-type",
ResourceTypes: interpolatedResourceTypes,
}))
Expect(strategy).To(Equal(fakeStrategy))
Expect([]worker.ArtifactSource{
actualContainerSpec.Inputs[0].Source(),
actualContainerSpec.Inputs[1].Source(),
actualContainerSpec.Inputs[2].Source(),
}).To(ConsistOf(
exec.PutResourceSource{fakeSource},
exec.PutResourceSource{fakeOtherSource},
exec.PutResourceSource{fakeMountedSource},
))
Expect(actualImageFetcherSpec.ResourceTypes).To(Equal(interpolatedResourceTypes))
Expect(actualImageFetcherSpec.Delegate).To(Equal(fakeDelegate))
})
Context("when the tracker can initialize the resource", func() {
var (
fakeResource *resourcefakes.FakeResource
fakeResourceConfig *dbfakes.FakeResourceConfig
fakeVersionResult resource.VersionResult
fakeVersionResult runtime.VersionResult
)
BeforeEach(func() {
@ -178,64 +224,18 @@ var _ = Describe("PutStep", func() {
fakeResourceConfigFactory.FindOrCreateResourceConfigReturns(fakeResourceConfig, nil)
fakeVersionResult = resource.VersionResult{
fakeVersionResult = runtime.VersionResult{
Version: atc.Version{"some": "version"},
Metadata: []atc.MetadataField{{Name: "some", Value: "metadata"}},
}
fakeWorker.NameReturns("some-worker")
fakePool.FindOrChooseWorkerForContainerReturns(fakeWorker, nil)
fakeResource = new(resourcefakes.FakeResource)
fakeResource.PutReturns(fakeVersionResult, nil)
fakeResourceFactory.NewResourceForContainerReturns(fakeResource)
})
It("finds/chooses a worker and creates a container with the correct type, session, and sources with no inputs specified (meaning it takes all artifacts)", func() {
Expect(fakePool.FindOrChooseWorkerForContainerCallCount()).To(Equal(1))
_, _, actualOwner, actualContainerSpec, actualWorkerSpec, strategy := fakePool.FindOrChooseWorkerForContainerArgsForCall(0)
Expect(actualOwner).To(Equal(db.NewBuildStepContainerOwner(42, atc.PlanID(planID), 123)))
Expect(actualContainerSpec.ImageSpec).To(Equal(worker.ImageSpec{
ResourceType: "some-resource-type",
}))
Expect(actualContainerSpec.Tags).To(Equal([]string{"some", "tags"}))
Expect(actualContainerSpec.TeamID).To(Equal(123))
Expect(actualContainerSpec.Env).To(Equal(stepMetadata.Env()))
Expect(actualContainerSpec.Dir).To(Equal("/tmp/build/put"))
Expect(actualContainerSpec.Inputs).To(HaveLen(3))
Expect(actualWorkerSpec).To(Equal(worker.WorkerSpec{
TeamID: 123,
Tags: []string{"some", "tags"},
ResourceType: "some-resource-type",
ResourceTypes: interpolatedResourceTypes,
}))
Expect(strategy).To(Equal(fakeStrategy))
_, _, delegate, owner, actualContainerMetadata, containerSpec, actualResourceTypes := fakeWorker.FindOrCreateContainerArgsForCall(0)
Expect(owner).To(Equal(db.NewBuildStepContainerOwner(42, atc.PlanID(planID), 123)))
Expect(actualContainerMetadata).To(Equal(containerMetadata))
Expect(containerSpec.ImageSpec).To(Equal(worker.ImageSpec{
ResourceType: "some-resource-type",
}))
Expect(containerSpec.Tags).To(Equal([]string{"some", "tags"}))
Expect(containerSpec.TeamID).To(Equal(123))
Expect(containerSpec.Env).To(Equal(stepMetadata.Env()))
Expect(containerSpec.Dir).To(Equal("/tmp/build/put"))
Expect(containerSpec.Inputs).To(HaveLen(3))
Expect([]worker.ArtifactSource{
containerSpec.Inputs[0].Source(),
containerSpec.Inputs[1].Source(),
containerSpec.Inputs[2].Source(),
}).To(ConsistOf(
exec.PutResourceSource{fakeSource},
exec.PutResourceSource{fakeOtherSource},
exec.PutResourceSource{fakeMountedSource},
))
Expect(actualResourceTypes).To(Equal(interpolatedResourceTypes))
Expect(delegate).To(Equal(fakeDelegate))
})
It("secrets are tracked", func() {
mapit := vars.NewMapCredVarsTrackerIterator()
credVarsTracker.IterateInterpolatedCreds(mapit)
@ -250,8 +250,8 @@ var _ = Describe("PutStep", func() {
}
})
It("initializes the container with specified inputs", func() {
_, _, _, _, _, containerSpec, _ := fakeWorker.FindOrCreateContainerArgsForCall(0)
It("calls RunPutStep with specified inputs", func() {
_, _, _, containerSpec, _, _, _, _, _, _, _, _, _ := fakeClient.RunPutStepArgsForCall(0)
Expect(containerSpec.Inputs).To(HaveLen(2))
Expect([]worker.ArtifactSource{
containerSpec.Inputs[0].Source(),
@ -263,30 +263,24 @@ var _ = Describe("PutStep", func() {
})
})
It("puts the resource with the given context", func() {
Expect(fakeResource.PutCallCount()).To(Equal(1))
putCtx, _, _, _ := fakeResource.PutArgsForCall(0)
It("calls RunPutStep with the given context", func() {
Expect(fakeClient.RunPutStepCallCount()).To(Equal(1))
putCtx, _, _, _, _, _, _, _, _, _, _, _, _ := fakeClient.RunPutStepArgsForCall(0)
Expect(putCtx).To(Equal(ctx))
})
It("puts the resource with the correct source and params", func() {
Expect(fakeResource.PutCallCount()).To(Equal(1))
_, _, putSource, putParams := fakeResource.PutArgsForCall(0)
Expect(fakeClient.RunPutStepCallCount()).To(Equal(1))
_, _, _, _, _, putSource, putParams, _, _, _, _, _, _ := fakeClient.RunPutStepArgsForCall(0)
Expect(putSource).To(Equal(atc.Source{"some": "super-secret-source"}))
Expect(putParams).To(Equal(atc.Params{"some-param": "some-value"}))
})
It("puts the resource with the io config forwarded", func() {
Expect(fakeResource.PutCallCount()).To(Equal(1))
_, ioConfig, _, _ := fakeResource.PutArgsForCall(0)
Expect(ioConfig.Stdout).To(Equal(stdoutBuf))
Expect(ioConfig.Stderr).To(Equal(stderrBuf))
})
It("runs the get resource action", func() {
Expect(fakeResource.PutCallCount()).To(Equal(1))
Expect(fakeClient.RunPutStepCallCount()).To(Equal(1))
_, _, _, _, _, _, _, _, _, _, _, processSpec, _ := fakeClient.RunPutStepArgsForCall(0)
Expect(processSpec.StdoutWriter).To(Equal(stdoutBuf))
Expect(processSpec.StderrWriter).To(Equal(stderrBuf))
})
It("is successful", func() {
@ -328,21 +322,19 @@ var _ = Describe("PutStep", func() {
Expect(info.Metadata).To(Equal([]atc.MetadataField{{Name: "some", Value: "metadata"}}))
})
It("stores the version info as the step result", func() {
It("stores the version result as the step result", func() {
Expect(state.StoreResultCallCount()).To(Equal(1))
sID, sVal := state.StoreResultArgsForCall(0)
Expect(sID).To(Equal(planID))
Expect(sVal).To(Equal(exec.VersionInfo{
Version: atc.Version{"some": "version"},
Metadata: []atc.MetadataField{{Name: "some", Value: "metadata"}},
}))
Expect(sVal).To(Equal(versionResult))
})
Context("when performing the put exits unsuccessfully", func() {
Context("when RunPutStep exits unsuccessfully", func() {
BeforeEach(func() {
fakeResource.PutReturns(resource.VersionResult{}, resource.ErrResourceScriptFailed{
versionResult = runtime.VersionResult{}
clientErr = resource.ErrResourceScriptFailed{
ExitStatus: 42,
})
}
})
It("finishes the step via the delegate", func() {
@ -361,11 +353,12 @@ var _ = Describe("PutStep", func() {
})
})
Context("when performing the put errors", func() {
Context("when RunPutStep exits with an error", func() {
disaster := errors.New("oh no")
BeforeEach(func() {
fakeResource.PutReturns(resource.VersionResult{}, disaster)
versionResult = runtime.VersionResult{}
clientErr = disaster
})
It("does not finish the step via the delegate", func() {
@ -381,30 +374,5 @@ var _ = Describe("PutStep", func() {
})
})
})
Context("when find or choosing a worker fails", func() {
disaster := errors.New("nope")
BeforeEach(func() {
fakePool.FindOrChooseWorkerForContainerReturns(nil, disaster)
})
It("returns the failure", func() {
Expect(stepErr).To(Equal(disaster))
})
})
Context("when find or creating a container fails", func() {
disaster := errors.New("nope")
BeforeEach(func() {
fakePool.FindOrChooseWorkerForContainerReturns(fakeWorker, nil)
fakeWorker.FindOrCreateContainerReturns(nil, disaster)
})
It("returns the failure", func() {
Expect(stepErr).To(Equal(disaster))
})
})
})
})

View File

@ -5,7 +5,7 @@ import (
"errors"
. "github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -20,7 +20,7 @@ var _ = Describe("Retry Step", func() {
attempt2 *execfakes.FakeStep
attempt3 *execfakes.FakeStep
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
step Step
@ -33,7 +33,7 @@ var _ = Describe("Retry Step", func() {
attempt2 = new(execfakes.FakeStep)
attempt3 = new(execfakes.FakeStep)
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)

View File

@ -5,22 +5,22 @@ import (
"sync"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
)
type runState struct {
artifacts *artifact.Repository
artifacts *build.Repository
results *sync.Map
}
func NewRunState() RunState {
return &runState{
artifacts: artifact.NewRepository(),
artifacts: build.NewRepository(),
results: &sync.Map{},
}
}
func (state *runState) Artifacts() *artifact.Repository {
func (state *runState) ArtifactRepository() *build.Repository {
return state.artifacts
}

View File

@ -5,7 +5,7 @@ import (
"io"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
)
//go:generate counterfeiter . Step
@ -36,19 +36,12 @@ type BuildOutputFilter func(text string) string
//go:generate counterfeiter . RunState
type RunState interface {
Artifacts() *artifact.Repository
ArtifactRepository() *build.Repository
Result(atc.PlanID, interface{}) bool
StoreResult(atc.PlanID, interface{})
}
// VersionInfo is the version and metadata of a resource that was fetched or
// produced. It is used by Put and Get.
type VersionInfo struct {
Version atc.Version
Metadata []atc.MetadataField
}
// ExitStatus is the resulting exit code from the process that the step ran.
// Typically if the ExitStatus result is 0, the Success result is true.
type ExitStatus int

View File

@ -12,7 +12,8 @@ import (
"code.cloudfoundry.org/lager"
"github.com/concourse/baggageclaim"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/vars"
"sigs.k8s.io/yaml"
)
@ -23,7 +24,7 @@ import (
type TaskConfigSource interface {
// FetchConfig returns the TaskConfig, and may have to a task config file out
// of the artifact.Repository.
FetchConfig(context.Context, lager.Logger, *artifact.Repository) (atc.TaskConfig, error)
FetchConfig(context.Context, lager.Logger, *build.Repository) (atc.TaskConfig, error)
Warnings() []string
}
@ -33,7 +34,7 @@ type StaticConfigSource struct {
}
// FetchConfig returns the configuration.
func (configSource StaticConfigSource) FetchConfig(context.Context, lager.Logger, *artifact.Repository) (atc.TaskConfig, error) {
func (configSource StaticConfigSource) FetchConfig(context.Context, lager.Logger, *build.Repository) (atc.TaskConfig, error) {
taskConfig := atc.TaskConfig{}
if configSource.Config != nil {
taskConfig = *configSource.Config
@ -49,6 +50,7 @@ func (configSource StaticConfigSource) Warnings() []string {
// be fetched from a specified file in the artifact.Repository.
type FileConfigSource struct {
ConfigPath string
Client worker.Client
}
// FetchConfig reads the specified file from the artifact.Repository and loads the
@ -66,23 +68,20 @@ type FileConfigSource struct {
//
// If the task config file is not found, or is invalid YAML, or is an invalid
// task configuration, the respective errors will be bubbled up.
func (configSource FileConfigSource) FetchConfig(ctx context.Context, logger lager.Logger, repo *artifact.Repository) (atc.TaskConfig, error) {
func (configSource FileConfigSource) FetchConfig(ctx context.Context, logger lager.Logger, repo *build.Repository) (atc.TaskConfig, error) {
segs := strings.SplitN(configSource.ConfigPath, "/", 2)
if len(segs) != 2 {
return atc.TaskConfig{}, UnspecifiedArtifactSourceError{configSource.ConfigPath}
}
sourceName := artifact.Name(segs[0])
sourceName := build.ArtifactName(segs[0])
filePath := segs[1]
source, found := repo.SourceFor(sourceName)
artifact, found := repo.ArtifactFor(sourceName)
if !found {
return atc.TaskConfig{}, UnknownArtifactSourceError{sourceName, configSource.ConfigPath}
}
// This context is not passed down yet because it would pollute the
// TaskConfigSource Interface as all the FetchConfigs will be need to have this passed in.
stream, err := source.StreamFile(ctx, logger, filePath)
stream, err := configSource.Client.StreamFileFromArtifact(ctx, logger, artifact, filePath)
if err != nil {
if err == baggageclaim.ErrFileNotFound {
return atc.TaskConfig{}, fmt.Errorf("task config '%s/%s' not found", sourceName, filePath)
@ -118,7 +117,7 @@ type OverrideParamsConfigSource struct {
// FetchConfig overrides parameters, allowing the user to set params required by a task loaded
// from a file by providing them in static configuration.
func (configSource *OverrideParamsConfigSource) FetchConfig(ctx context.Context, logger lager.Logger, source *artifact.Repository) (atc.TaskConfig, error) {
func (configSource *OverrideParamsConfigSource) FetchConfig(ctx context.Context, logger lager.Logger, source *build.Repository) (atc.TaskConfig, error) {
taskConfig, err := configSource.ConfigSource.FetchConfig(ctx, logger, source)
if err != nil {
return atc.TaskConfig{}, err
@ -165,7 +164,7 @@ type InterpolateTemplateConfigSource struct {
}
// FetchConfig returns the interpolated configuration
func (configSource InterpolateTemplateConfigSource) FetchConfig(ctx context.Context, logger lager.Logger, source *artifact.Repository) (atc.TaskConfig, error) {
func (configSource InterpolateTemplateConfigSource) FetchConfig(ctx context.Context, logger lager.Logger, source *build.Repository) (atc.TaskConfig, error) {
taskConfig, err := configSource.ConfigSource.FetchConfig(ctx, logger, source)
if err != nil {
return atc.TaskConfig{}, err
@ -202,7 +201,7 @@ type ValidatingConfigSource struct {
// FetchConfig fetches the config using the underlying ConfigSource, and checks
// that it's valid.
func (configSource ValidatingConfigSource) FetchConfig(ctx context.Context, logger lager.Logger, source *artifact.Repository) (atc.TaskConfig, error) {
func (configSource ValidatingConfigSource) FetchConfig(ctx context.Context, logger lager.Logger, source *build.Repository) (atc.TaskConfig, error) {
config, err := configSource.ConfigSource.FetchConfig(ctx, logger, source)
if err != nil {
return atc.TaskConfig{}, err
@ -219,10 +218,10 @@ func (configSource ValidatingConfigSource) Warnings() []string {
return configSource.ConfigSource.Warnings()
}
// UnknownArtifactSourceError is returned when the artifact.Name specified by the
// UnknownArtifactSourceError is returned when the artifact.ArtifactName specified by the
// path does not exist in the artifact.Repository.
type UnknownArtifactSourceError struct {
SourceName artifact.Name
SourceName build.ArtifactName
ConfigPath string
}

View File

@ -8,7 +8,7 @@ import (
"github.com/concourse/baggageclaim"
"github.com/concourse/concourse/atc"
. "github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
"github.com/concourse/concourse/atc/worker/workerfakes"
"github.com/concourse/concourse/vars"
@ -22,13 +22,13 @@ var _ = Describe("TaskConfigSource", func() {
var (
taskConfig atc.TaskConfig
taskVars atc.Params
repo *artifact.Repository
repo *build.Repository
logger *lagertest.TestLogger
)
BeforeEach(func() {
logger = lagertest.NewTestLogger("task-config-source-test")
repo = artifact.NewRepository()
repo = build.NewRepository()
taskConfig = atc.TaskConfig{
Platform: "some-platform",
RootfsURI: "some-image",

View File

@ -2,9 +2,7 @@ package exec
import (
"context"
"errors"
"fmt"
"io"
"path"
"path/filepath"
"strings"
@ -15,7 +13,7 @@ import (
"github.com/concourse/concourse/atc/creds"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/vars"
@ -160,7 +158,7 @@ func (step *TaskStep) Run(ctx context.Context, state RunState) error {
// validate
taskConfigSource = ValidatingConfigSource{ConfigSource: taskConfigSource}
repository := state.Artifacts()
repository := state.ArtifactRepository()
config, err := taskConfigSource.FetchConfig(ctx, logger, repository)
@ -191,7 +189,7 @@ func (step *TaskStep) Run(ctx context.Context, state RunState) error {
return err
}
processSpec := worker.TaskProcessSpec{
processSpec := worker.ProcessSpec{
Path: config.Run.Path,
Args: config.Run.Args,
Dir: config.Run.Dir,
@ -266,7 +264,7 @@ func (step *TaskStep) Succeeded() bool {
return step.succeeded
}
func (step *TaskStep) imageSpec(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig) (worker.ImageSpec, error) {
func (step *TaskStep) imageSpec(logger lager.Logger, repository *build.Repository, config atc.TaskConfig) (worker.ImageSpec, error) {
imageSpec := worker.ImageSpec{
Privileged: bool(step.plan.Privileged),
}
@ -274,12 +272,12 @@ func (step *TaskStep) imageSpec(logger lager.Logger, repository *artifact.Reposi
// Determine the source of the container image
// a reference to an artifact (get step, task output) ?
if step.plan.ImageArtifactName != "" {
source, found := repository.SourceFor(artifact.Name(step.plan.ImageArtifactName))
art, found := repository.ArtifactFor(build.ArtifactName(step.plan.ImageArtifactName))
if !found {
return worker.ImageSpec{}, MissingTaskImageSourceError{step.plan.ImageArtifactName}
}
imageSpec.ImageArtifactSource = source
imageSpec.ImageArtifact = art
//an image_resource
} else if config.ImageResource != nil {
@ -297,8 +295,8 @@ func (step *TaskStep) imageSpec(logger lager.Logger, repository *artifact.Reposi
return imageSpec, nil
}
func (step *TaskStep) containerInputs(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig, metadata db.ContainerMetadata) ([]worker.InputSource, error) {
inputs := []worker.InputSource{}
func (step *TaskStep) containerInputs(logger lager.Logger, repository *build.Repository, config atc.TaskConfig, metadata db.ContainerMetadata) ([]worker.FooBarInput, error) {
inputs := []worker.FooBarInput{}
var missingRequiredInputs []string
for _, input := range config.Inputs {
@ -307,7 +305,7 @@ func (step *TaskStep) containerInputs(logger lager.Logger, repository *artifact.
inputName = sourceName
}
source, found := repository.SourceFor(artifact.Name(inputName))
art, found := repository.ArtifactFor(build.ArtifactName(inputName))
if !found {
if !input.Optional {
missingRequiredInputs = append(missingRequiredInputs, inputName)
@ -315,9 +313,9 @@ func (step *TaskStep) containerInputs(logger lager.Logger, repository *artifact.
continue
}
inputs = append(inputs, &taskInputSource{
inputs = append(inputs, &taskInput{
config: input,
source: source,
artifact: art,
artifactsRoot: metadata.WorkingDirectory,
})
}
@ -327,9 +325,14 @@ func (step *TaskStep) containerInputs(logger lager.Logger, repository *artifact.
}
for _, cacheConfig := range config.Caches {
source := newTaskCacheSource(logger, step.metadata.TeamID, step.metadata.JobID, step.plan.Name, cacheConfig.Path)
inputs = append(inputs, &taskCacheInputSource{
source: source,
cacheArt := &runtime.TaskCacheArtifact{
TeamID: step.metadata.TeamID,
JobID: step.metadata.JobID,
StepName: step.plan.Name,
Path: cacheConfig.Path,
}
inputs = append(inputs, &taskCacheInput{
artifact: cacheArt,
artifactsRoot: metadata.WorkingDirectory,
cachePath: cacheConfig.Path,
})
@ -338,7 +341,7 @@ func (step *TaskStep) containerInputs(logger lager.Logger, repository *artifact.
return inputs, nil
}
func (step *TaskStep) containerSpec(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig, metadata db.ContainerMetadata) (worker.ContainerSpec, error) {
func (step *TaskStep) containerSpec(logger lager.Logger, repository *build.Repository, config atc.TaskConfig, metadata db.ContainerMetadata) (worker.ContainerSpec, error) {
imageSpec, err := step.imageSpec(logger, repository, config)
if err != nil {
return worker.ContainerSpec{}, err
@ -355,11 +358,11 @@ func (step *TaskStep) containerSpec(logger lager.Logger, repository *artifact.Re
Env: config.Params.Env(),
Type: metadata.Type,
Inputs: []worker.InputSource{},
Outputs: worker.OutputPaths{},
InputFooBars: []worker.FooBarInput{},
Outputs: worker.OutputPaths{},
}
containerSpec.Inputs, err = step.containerInputs(logger, repository, config, metadata)
containerSpec.InputFooBars, err = step.containerInputs(logger, repository, config, metadata)
if err != nil {
return worker.ContainerSpec{}, err
}
@ -372,7 +375,7 @@ func (step *TaskStep) containerSpec(logger lager.Logger, repository *artifact.Re
return containerSpec, nil
}
func (step *TaskStep) workerSpec(logger lager.Logger, resourceTypes atc.VersionedResourceTypes, repository *artifact.Repository, config atc.TaskConfig) (worker.WorkerSpec, error) {
func (step *TaskStep) workerSpec(logger lager.Logger, resourceTypes atc.VersionedResourceTypes, repository *build.Repository, config atc.TaskConfig) (worker.WorkerSpec, error) {
workerSpec := worker.WorkerSpec{
Platform: config.Platform,
Tags: step.plan.Tags,
@ -392,7 +395,7 @@ func (step *TaskStep) workerSpec(logger lager.Logger, resourceTypes atc.Versione
return workerSpec, nil
}
func (step *TaskStep) registerOutputs(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig, volumeMounts []worker.VolumeMount, metadata db.ContainerMetadata) {
func (step *TaskStep) registerOutputs(logger lager.Logger, repository *build.Repository, config atc.TaskConfig, volumeMounts []worker.VolumeMount, metadata db.ContainerMetadata) {
logger.Debug("registering-outputs", lager.Data{"outputs": config.Outputs})
for _, output := range config.Outputs {
@ -405,14 +408,16 @@ func (step *TaskStep) registerOutputs(logger lager.Logger, repository *artifact.
for _, mount := range volumeMounts {
if filepath.Clean(mount.MountPath) == filepath.Clean(outputPath) {
source := NewTaskArtifactSource(mount.Volume)
repository.RegisterSource(artifact.Name(outputName), source)
art := &runtime.TaskArtifact{
VolumeHandle: mount.Volume.Handle(),
}
repository.RegisterArtifact(build.ArtifactName(outputName), art)
}
}
}
}
func (step *TaskStep) registerCaches(logger lager.Logger, repository *artifact.Repository, config atc.TaskConfig, volumeMounts []worker.VolumeMount, metadata db.ContainerMetadata) error {
func (step *TaskStep) registerCaches(logger lager.Logger, repository *build.Repository, config atc.TaskConfig, volumeMounts []worker.VolumeMount, metadata db.ContainerMetadata) error {
logger.Debug("initializing-caches", lager.Data{"caches": config.Caches})
for _, cacheConfig := range config.Caches {
@ -437,41 +442,15 @@ func (step *TaskStep) registerCaches(logger lager.Logger, repository *artifact.R
return nil
}
type taskArtifactSource struct {
worker.Volume
}
func NewTaskArtifactSource(volume worker.Volume) *taskArtifactSource {
return &taskArtifactSource{volume}
}
func (src *taskArtifactSource) StreamTo(ctx context.Context, logger lager.Logger, destination worker.ArtifactDestination) error {
logger = logger.Session("task-artifact-streaming", lager.Data{
"src-volume": src.Handle(),
"src-worker": src.WorkerName(),
})
return streamToHelper(ctx, src, logger, destination)
}
func (src *taskArtifactSource) StreamFile(ctx context.Context, logger lager.Logger, filename string) (io.ReadCloser, error) {
logger.Debug("streaming-file-from-volume")
return streamFileHelper(ctx, src, logger, filename)
}
func (src *taskArtifactSource) VolumeOn(logger lager.Logger, w worker.Worker) (worker.Volume, bool, error) {
return w.LookupVolume(logger, src.Handle())
}
type taskInputSource struct {
type taskInput struct {
config atc.TaskInputConfig
source worker.ArtifactSource
artifact runtime.Artifact
artifactsRoot string
}
func (s *taskInputSource) Source() worker.ArtifactSource { return s.source }
func (s *taskInput) Artifact() runtime.Artifact { return s.artifact }
func (s *taskInputSource) DestinationPath() string {
func (s *taskInput) DestinationPath() string {
subdir := s.config.Path
if s.config.Path == "" {
subdir = s.config.Name
@ -489,51 +468,14 @@ func artifactsPath(outputConfig atc.TaskOutputConfig, artifactsRoot string) stri
return path.Join(artifactsRoot, outputSrc) + "/"
}
type taskCacheInputSource struct {
source worker.ArtifactSource
type taskCacheInput struct {
artifact runtime.Artifact
artifactsRoot string
cachePath string
}
func (s *taskCacheInputSource) Source() worker.ArtifactSource { return s.source }
func (s *taskCacheInput) Artifact() runtime.Artifact { return s.artifact }
func (s *taskCacheInputSource) DestinationPath() string {
func (s *taskCacheInput) DestinationPath() string {
return filepath.Join(s.artifactsRoot, s.cachePath)
}
type taskCacheSource struct {
logger lager.Logger
teamID int
jobID int
stepName string
path string
}
func newTaskCacheSource(
logger lager.Logger,
teamID int,
jobID int,
stepName string,
path string,
) *taskCacheSource {
return &taskCacheSource{
logger: logger,
teamID: teamID,
jobID: jobID,
stepName: stepName,
path: path,
}
}
func (src *taskCacheSource) StreamTo(ctx context.Context, logger lager.Logger, destination worker.ArtifactDestination) error {
// cache will be initialized every time on a new worker
return nil
}
func (src *taskCacheSource) StreamFile(ctx context.Context, logger lager.Logger, filename string) (io.ReadCloser, error) {
return nil, errors.New("taskCacheSource.StreamFile not implemented")
}
func (src *taskCacheSource) VolumeOn(logger lager.Logger, w worker.Worker) (worker.Volume, bool, error) {
return w.FindVolumeForTaskCache(src.logger, src.teamID, src.jobID, src.stepName, src.path)
}

View File

@ -8,6 +8,8 @@ import (
"io/ioutil"
"strings"
"github.com/concourse/concourse/atc/runtime"
"code.cloudfoundry.org/lager"
"code.cloudfoundry.org/lager/lagertest"
"github.com/DataDog/zstd"
@ -19,7 +21,7 @@ import (
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock/lockfakes"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/worker/workerfakes"
@ -45,7 +47,7 @@ var _ = Describe("TaskStep", func() {
interpolatedResourceTypes atc.VersionedResourceTypes
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
taskStep exec.Step
@ -88,7 +90,7 @@ var _ = Describe("TaskStep", func() {
fakeDelegate.StdoutReturns(stdoutBuf)
fakeDelegate.StderrReturns(stderrBuf)
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)
@ -989,7 +991,7 @@ var _ = Describe("TaskStep", func() {
Context("but the stream is empty", func() {
It("returns ErrFileNotFound", func() {
_, err := artifactSource1.StreamFile(context.TODO(), logger, "some-path")
Expect(err).To(MatchError(exec.FileNotFoundError{Path: "some-path"}))
Expect(err).To(MatchError(runtime.FileNotFoundError{Path: "some-path"}))
})
})
})

View File

@ -6,7 +6,7 @@ import (
"time"
. "github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -19,7 +19,7 @@ var _ = Describe("Timeout Step", func() {
fakeStep *execfakes.FakeStep
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
step Step
@ -34,7 +34,7 @@ var _ = Describe("Timeout Step", func() {
fakeStep = new(execfakes.FakeStep)
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)

View File

@ -5,7 +5,7 @@ import (
"errors"
. "github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/build"
"github.com/concourse/concourse/atc/exec/execfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -18,7 +18,7 @@ var _ = Describe("Try Step", func() {
runStep *execfakes.FakeStep
repo *artifact.Repository
repo *build.Repository
state *execfakes.FakeRunState
step Step
@ -29,7 +29,7 @@ var _ = Describe("Try Step", func() {
runStep = new(execfakes.FakeStep)
repo = artifact.NewRepository()
repo = build.NewRepository()
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)

View File

@ -2,6 +2,7 @@ package exec
import (
"errors"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc"
)
@ -39,7 +40,7 @@ type PutStepVersionSource struct {
}
func (p *PutStepVersionSource) Version(state RunState) (atc.Version, error) {
var info VersionInfo
var info runtime.VersionResult
if !state.Result(p.planID, &info) {
return atc.Version{}, ErrPutStepVersionMissing
}

View File

@ -1,135 +0,0 @@
package fetcher
import (
"context"
"errors"
"io"
"time"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/worker"
)
const GetResourceLockInterval = 5 * time.Second
var ErrFailedToGetLock = errors.New("failed to get lock")
var ErrInterrupted = errors.New("interrupted")
//go:generate counterfeiter . Fetcher
type Fetcher interface {
Fetch(
ctx context.Context,
logger lager.Logger,
containerMetadata db.ContainerMetadata,
gardenWorker worker.Worker,
containerSpec worker.ContainerSpec,
resourceTypes atc.VersionedResourceTypes,
resourceInstance resource.ResourceInstance,
imageFetchingDelegate worker.ImageFetchingDelegate,
) (resource.VersionedSource, error)
}
func NewFetcher(
clock clock.Clock,
lockFactory lock.LockFactory,
fetchSourceFactory FetchSourceFactory,
) Fetcher {
return &fetcher{
clock: clock,
lockFactory: lockFactory,
fetchSourceFactory: fetchSourceFactory,
}
}
type fetcher struct {
clock clock.Clock
lockFactory lock.LockFactory
fetchSourceFactory FetchSourceFactory
}
func (f *fetcher) Fetch(
ctx context.Context,
logger lager.Logger,
containerMetadata db.ContainerMetadata,
gardenWorker worker.Worker,
containerSpec worker.ContainerSpec,
resourceTypes atc.VersionedResourceTypes,
resourceInstance resource.ResourceInstance,
imageFetchingDelegate worker.ImageFetchingDelegate,
) (resource.VersionedSource, error) {
containerSpec.Outputs = map[string]string{
"resource": resource.ResourcesDir("get"),
}
source := f.fetchSourceFactory.NewFetchSource(logger, gardenWorker, resourceInstance, resourceTypes, containerSpec, containerMetadata, imageFetchingDelegate)
ticker := f.clock.NewTicker(GetResourceLockInterval)
defer ticker.Stop()
versionedSource, err := f.fetchWithLock(ctx, logger, source, imageFetchingDelegate.Stdout())
if err != ErrFailedToGetLock {
return versionedSource, err
}
for {
select {
case <-ticker.C():
versionedSource, err := f.fetchWithLock(ctx, logger, source, imageFetchingDelegate.Stdout())
if err != nil {
if err == ErrFailedToGetLock {
break
}
return nil, err
}
return versionedSource, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
}
func (f *fetcher) fetchWithLock(
ctx context.Context,
logger lager.Logger,
source FetchSource,
stdout io.Writer,
) (resource.VersionedSource, error) {
versionedSource, found, err := source.Find()
if err != nil {
return nil, err
}
if found {
return versionedSource, nil
}
lockName, err := source.LockName()
if err != nil {
return nil, err
}
lockLogger := logger.Session("lock-task", lager.Data{"lock-name": lockName})
lock, acquired, err := f.lockFactory.Acquire(lockLogger, lock.NewTaskLockID(lockName))
if err != nil {
lockLogger.Error("failed-to-get-lock", err)
return nil, ErrFailedToGetLock
}
if !acquired {
lockLogger.Debug("did-not-get-lock")
return nil, ErrFailedToGetLock
}
defer lock.Release()
return source.Create(ctx)
}

View File

@ -5,7 +5,8 @@ import (
"context"
"sync"
"github.com/concourse/concourse/atc/fetcher"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/resource"
)
@ -257,4 +258,4 @@ func (fake *FakeFetchSource) recordInvocation(key string, args []interface{}) {
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ fetcher.FetchSource = new(FakeFetchSource)
var _ worker.FetchSource = new(FakeFetchSource)

View File

@ -7,13 +7,12 @@ import (
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/fetcher"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/worker"
)
type FakeFetchSourceFactory struct {
NewFetchSourceStub func(lager.Logger, worker.Worker, resource.ResourceInstance, atc.VersionedResourceTypes, worker.ContainerSpec, db.ContainerMetadata, worker.ImageFetchingDelegate) fetcher.FetchSource
NewFetchSourceStub func(lager.Logger, worker.Worker, resource.ResourceInstance, atc.VersionedResourceTypes, worker.ContainerSpec, db.ContainerMetadata, worker.ImageFetchingDelegate) worker.FetchSource
newFetchSourceMutex sync.RWMutex
newFetchSourceArgsForCall []struct {
arg1 lager.Logger
@ -25,16 +24,16 @@ type FakeFetchSourceFactory struct {
arg7 worker.ImageFetchingDelegate
}
newFetchSourceReturns struct {
result1 fetcher.FetchSource
result1 worker.FetchSource
}
newFetchSourceReturnsOnCall map[int]struct {
result1 fetcher.FetchSource
result1 worker.FetchSource
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakeFetchSourceFactory) NewFetchSource(arg1 lager.Logger, arg2 worker.Worker, arg3 resource.ResourceInstance, arg4 atc.VersionedResourceTypes, arg5 worker.ContainerSpec, arg6 db.ContainerMetadata, arg7 worker.ImageFetchingDelegate) fetcher.FetchSource {
func (fake *FakeFetchSourceFactory) NewFetchSource(arg1 lager.Logger, arg2 worker.Worker, arg3 resource.ResourceInstance, arg4 atc.VersionedResourceTypes, arg5 worker.ContainerSpec, arg6 db.ContainerMetadata, arg7 worker.ImageFetchingDelegate) worker.FetchSource {
fake.newFetchSourceMutex.Lock()
ret, specificReturn := fake.newFetchSourceReturnsOnCall[len(fake.newFetchSourceArgsForCall)]
fake.newFetchSourceArgsForCall = append(fake.newFetchSourceArgsForCall, struct {
@ -64,7 +63,7 @@ func (fake *FakeFetchSourceFactory) NewFetchSourceCallCount() int {
return len(fake.newFetchSourceArgsForCall)
}
func (fake *FakeFetchSourceFactory) NewFetchSourceCalls(stub func(lager.Logger, worker.Worker, resource.ResourceInstance, atc.VersionedResourceTypes, worker.ContainerSpec, db.ContainerMetadata, worker.ImageFetchingDelegate) fetcher.FetchSource) {
func (fake *FakeFetchSourceFactory) NewFetchSourceCalls(stub func(lager.Logger, worker.Worker, resource.ResourceInstance, atc.VersionedResourceTypes, worker.ContainerSpec, db.ContainerMetadata, worker.ImageFetchingDelegate) worker.FetchSource) {
fake.newFetchSourceMutex.Lock()
defer fake.newFetchSourceMutex.Unlock()
fake.NewFetchSourceStub = stub
@ -77,26 +76,26 @@ func (fake *FakeFetchSourceFactory) NewFetchSourceArgsForCall(i int) (lager.Logg
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7
}
func (fake *FakeFetchSourceFactory) NewFetchSourceReturns(result1 fetcher.FetchSource) {
func (fake *FakeFetchSourceFactory) NewFetchSourceReturns(result1 worker.FetchSource) {
fake.newFetchSourceMutex.Lock()
defer fake.newFetchSourceMutex.Unlock()
fake.NewFetchSourceStub = nil
fake.newFetchSourceReturns = struct {
result1 fetcher.FetchSource
result1 worker.FetchSource
}{result1}
}
func (fake *FakeFetchSourceFactory) NewFetchSourceReturnsOnCall(i int, result1 fetcher.FetchSource) {
func (fake *FakeFetchSourceFactory) NewFetchSourceReturnsOnCall(i int, result1 worker.FetchSource) {
fake.newFetchSourceMutex.Lock()
defer fake.newFetchSourceMutex.Unlock()
fake.NewFetchSourceStub = nil
if fake.newFetchSourceReturnsOnCall == nil {
fake.newFetchSourceReturnsOnCall = make(map[int]struct {
result1 fetcher.FetchSource
result1 worker.FetchSource
})
}
fake.newFetchSourceReturnsOnCall[i] = struct {
result1 fetcher.FetchSource
result1 worker.FetchSource
}{result1}
}
@ -124,4 +123,4 @@ func (fake *FakeFetchSourceFactory) recordInvocation(key string, args []interfac
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ fetcher.FetchSourceFactory = new(FakeFetchSourceFactory)
var _ worker.FetchSourceFactory = new(FakeFetchSourceFactory)

View File

@ -8,7 +8,6 @@ import (
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/fetcher"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/worker"
)
@ -132,4 +131,4 @@ func (fake *FakeFetcher) recordInvocation(key string, args []interface{}) {
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ fetcher.Fetcher = new(FakeFetcher)
var _ worker.Fetcher = new(FakeFetcher)

View File

@ -1,197 +0,0 @@
package fetcher
import (
"context"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/worker"
)
//go:generate counterfeiter . FetchSource
type FetchSource interface {
LockName() (string, error)
Find() (resource.VersionedSource, bool, error)
Create(context.Context) (resource.VersionedSource, error)
}
//go:generate counterfeiter . FetchSourceFactory
type FetchSourceFactory interface {
NewFetchSource(
logger lager.Logger,
worker worker.Worker,
resourceInstance resource.ResourceInstance,
resourceTypes atc.VersionedResourceTypes,
containerSpec worker.ContainerSpec,
containerMetadata db.ContainerMetadata,
imageFetchingDelegate worker.ImageFetchingDelegate,
) FetchSource
}
type fetchSourceFactory struct {
resourceCacheFactory db.ResourceCacheFactory
resourceFactory resource.ResourceFactory
}
func NewFetchSourceFactory(
resourceCacheFactory db.ResourceCacheFactory,
resourceFactory resource.ResourceFactory,
) FetchSourceFactory {
return &fetchSourceFactory{
resourceCacheFactory: resourceCacheFactory,
resourceFactory: resourceFactory,
}
}
func (r *fetchSourceFactory) NewFetchSource(
logger lager.Logger,
worker worker.Worker,
resourceInstance resource.ResourceInstance,
resourceTypes atc.VersionedResourceTypes,
containerSpec worker.ContainerSpec,
containerMetadata db.ContainerMetadata,
imageFetchingDelegate worker.ImageFetchingDelegate,
) FetchSource {
return &resourceInstanceFetchSource{
logger: logger,
worker: worker,
resourceInstance: resourceInstance,
resourceTypes: resourceTypes,
containerSpec: containerSpec,
containerMetadata: containerMetadata,
imageFetchingDelegate: imageFetchingDelegate,
dbResourceCacheFactory: r.resourceCacheFactory,
resourceFactory: r.resourceFactory,
}
}
type resourceInstanceFetchSource struct {
logger lager.Logger
worker worker.Worker
resourceInstance resource.ResourceInstance
resourceTypes atc.VersionedResourceTypes
containerSpec worker.ContainerSpec
containerMetadata db.ContainerMetadata
imageFetchingDelegate worker.ImageFetchingDelegate
dbResourceCacheFactory db.ResourceCacheFactory
resourceFactory resource.ResourceFactory
}
func (s *resourceInstanceFetchSource) LockName() (string, error) {
return s.resourceInstance.LockName(s.worker.Name())
}
func (s *resourceInstanceFetchSource) Find() (resource.VersionedSource, bool, error) {
sLog := s.logger.Session("find")
volume, found, err := s.resourceInstance.FindOn(s.logger, s.worker)
if err != nil {
sLog.Error("failed-to-find-initialized-on", err)
return nil, false, err
}
if !found {
return nil, false, nil
}
metadata, err := s.dbResourceCacheFactory.ResourceCacheMetadata(s.resourceInstance.ResourceCache())
if err != nil {
sLog.Error("failed-to-get-resource-cache-metadata", err)
return nil, false, err
}
s.logger.Debug("found-initialized-versioned-source", lager.Data{"version": s.resourceInstance.Version(), "metadata": metadata.ToATCMetadata()})
return resource.NewGetVersionedSource(
volume,
s.resourceInstance.Version(),
metadata.ToATCMetadata(),
), true, nil
}
// Create runs under the lock but we need to make sure volume does not exist
// yet before creating it under the lock
func (s *resourceInstanceFetchSource) Create(ctx context.Context) (resource.VersionedSource, error) {
sLog := s.logger.Session("create")
versionedSource, found, err := s.Find()
if err != nil {
return nil, err
}
if found {
return versionedSource, nil
}
s.containerSpec.BindMounts = []worker.BindMountSource{
&worker.CertsVolumeMount{Logger: s.logger},
}
container, err := s.worker.FindOrCreateContainer(
ctx,
s.logger,
s.imageFetchingDelegate,
s.resourceInstance.ContainerOwner(),
s.containerMetadata,
s.containerSpec,
s.resourceTypes,
)
if err != nil {
return nil, err
}
if err != nil {
sLog.Error("failed-to-construct-resource", err)
return nil, err
}
mountPath := resource.ResourcesDir("get")
var volume worker.Volume
for _, mount := range container.VolumeMounts() {
if mount.MountPath == mountPath {
volume = mount.Volume
break
}
}
res := s.resourceFactory.NewResourceForContainer(container)
versionedSource, err = res.Get(
ctx,
volume,
resource.IOConfig{
Stdout: s.imageFetchingDelegate.Stdout(),
Stderr: s.imageFetchingDelegate.Stderr(),
},
s.resourceInstance.Source(),
s.resourceInstance.Params(),
s.resourceInstance.Version(),
)
if err != nil {
sLog.Error("failed-to-fetch-resource", err)
return nil, err
}
err = volume.SetPrivileged(false)
if err != nil {
sLog.Error("failed-to-set-volume-unprivileged", err)
return nil, err
}
err = volume.InitializeResourceCache(s.resourceInstance.ResourceCache())
if err != nil {
sLog.Error("failed-to-initialize-cache", err)
return nil, err
}
err = s.dbResourceCacheFactory.UpdateResourceCacheMetadata(s.resourceInstance.ResourceCache(), versionedSource.Metadata())
if err != nil {
s.logger.Error("failed-to-update-resource-cache-metadata", err, lager.Data{"resource-cache": s.resourceInstance.ResourceCache()})
return nil, err
}
return versionedSource, nil
}

View File

@ -2,9 +2,10 @@ package resource
import (
"context"
"io"
"path/filepath"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/worker"
)
@ -18,8 +19,8 @@ type ResourceFactory interface {
//go:generate counterfeiter . Resource
type Resource interface {
Get(context.Context, worker.Volume, IOConfig, atc.Source, atc.Params, atc.Version) (VersionedSource, error)
Put(context.Context, IOConfig, atc.Source, atc.Params) (VersionResult, error)
Get(context.Context, worker.Volume, runtime.IOConfig, atc.Source, atc.Params, atc.Version) (VersionedSource, error)
Put(context.Context, runtime.IOConfig, atc.Source, atc.Params) (runtime.VersionResult, error)
Check(context.Context, atc.Source, atc.Version) ([]atc.Version, error)
}
@ -29,12 +30,11 @@ type Metadata interface {
Env() []string
}
type IOConfig struct {
Stdout io.Writer
Stderr io.Writer
}
//type IOConfig struct {
// Stdout io.Writer
// Stderr io.Writer
//}
// TODO: check if we need it
func ResourcesDir(suffix string) string {
return filepath.Join("/tmp", "build", suffix)
}
@ -46,9 +46,8 @@ func NewResource(container worker.Container) *resource {
}
type resource struct {
// TODO make this wrap a Runnable instead of a container
container worker.Container
ScriptFailure bool
}
func NewResourceFactory() *resourceFactory {

View File

@ -3,8 +3,11 @@ package resource
import (
"context"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc"
)
type getRequest struct {
@ -16,13 +19,14 @@ type getRequest struct {
func (resource *resource) Get(
ctx context.Context,
volume worker.Volume,
ioConfig IOConfig,
ioConfig runtime.IOConfig,
source atc.Source,
params atc.Params,
version atc.Version,
) (VersionedSource, error) {
var vr VersionResult
var vr runtime.VersionResult
// should be something on worker client, not direct runScript call
err := resource.runScript(
ctx,
"/opt/resource/in",

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
"github.com/concourse/concourse/atc/runtime"
"io"
"io/ioutil"
@ -34,7 +35,7 @@ var _ = Describe("Resource Get", func() {
versionedSource resource.VersionedSource
ioConfig resource.IOConfig
ioConfig runtime.IOConfig
stdoutBuf *gbytes.Buffer
stderrBuf *gbytes.Buffer
@ -69,7 +70,7 @@ var _ = Describe("Resource Get", func() {
stdoutBuf = gbytes.NewBuffer()
stderrBuf = gbytes.NewBuffer()
ioConfig = resource.IOConfig{
ioConfig = runtime.IOConfig{
Stdout: stdoutBuf,
Stderr: stderrBuf,
}

View File

@ -1,14 +1,8 @@
package resource
import (
"crypto/sha256"
"encoding/json"
"fmt"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
)
//go:generate counterfeiter . ResourceInstance
@ -23,9 +17,9 @@ type ResourceInstance interface {
ResourceCache() db.UsedResourceCache
ContainerOwner() db.ContainerOwner
LockName(string) (string, error)
//LockName(string) (string, error)
FindOn(lager.Logger, worker.Worker) (worker.Volume, bool, error)
//FindOn(lager.Logger, worker.Worker) (worker.Volume, bool, error)
}
type resourceInstance struct {
@ -86,28 +80,28 @@ func (instance resourceInstance) ResourceType() ResourceType {
}
// XXX: this is weird
func (instance resourceInstance) LockName(workerName string) (string, error) {
id := &resourceInstanceLockID{
Type: instance.resourceTypeName,
Version: instance.version,
Source: instance.source,
Params: instance.params,
WorkerName: workerName,
}
//func (instance resourceInstance) LockName(workerName string) (string, error) {
// id := &resourceInstanceLockID{
// Type: instance.resourceTypeName,
// Version: instance.version,
// Source: instance.source,
// Params: instance.params,
// WorkerName: workerName,
// }
//
// taskNameJSON, err := json.Marshal(id)
// if err != nil {
// return "", err
// }
// return fmt.Sprintf("%x", sha256.Sum256(taskNameJSON)), nil
//}
taskNameJSON, err := json.Marshal(id)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", sha256.Sum256(taskNameJSON)), nil
}
func (instance resourceInstance) FindOn(logger lager.Logger, w worker.Worker) (worker.Volume, bool, error) {
return w.FindVolumeForResourceCache(
logger,
instance.resourceCache,
)
}
//func (instance resourceInstance) FindOn(logger lager.Logger, w worker.Worker) (worker.Volume, bool, error) {
// return w.FindVolumeForResourceCache(
// logger,
// instance.resourceCache,
// )
//}
type resourceInstanceLockID struct {
Type ResourceType `json:"type,omitempty"`

View File

@ -4,30 +4,32 @@ import (
"context"
"fmt"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc"
)
type putRequest struct {
Source atc.Source `json:"source"`
Params atc.Params `json:"params,omitempty"`
}
//type putRequest struct {
// Source atc.Source `json:"source"`
// Params atc.Params `json:"params,omitempty"`
//}
func (resource *resource) Put(
ctx context.Context,
ioConfig IOConfig,
ioConfig runtime.IOConfig,
source atc.Source,
params atc.Params,
) (VersionResult, error) {
) (runtime.VersionResult, error) {
resourceDir := ResourcesDir("put")
vr := &VersionResult{}
vr := &runtime.VersionResult{}
path := "/opt/resource/out"
err := resource.runScript(
ctx,
path,
[]string{resourceDir},
putRequest{
runtime.PutRequest{
Params: params,
Source: source,
},
@ -36,7 +38,7 @@ func (resource *resource) Put(
true,
)
if err != nil {
return VersionResult{}, err
return runtime.VersionResult{}, err
}
if vr == nil {
return VersionResult{}, fmt.Errorf("resource script (%s %s) output a null version", path, resourceDir)

View File

@ -3,6 +3,7 @@ package resource_test
import (
"context"
"errors"
"github.com/concourse/concourse/atc/runtime"
"io/ioutil"
"code.cloudfoundry.org/garden"
@ -30,9 +31,9 @@ var _ = Describe("Resource Put", func() {
outScriptProcess *gfakes.FakeProcess
versionResult VersionResult
versionResult runtime.VersionResult
ioConfig IOConfig
ioConfig runtime.IOConfig
stdoutBuf *gbytes.Buffer
stderrBuf *gbytes.Buffer
@ -61,7 +62,7 @@ var _ = Describe("Resource Put", func() {
stdoutBuf = gbytes.NewBuffer()
stderrBuf = gbytes.NewBuffer()
ioConfig = IOConfig{
ioConfig = runtime.IOConfig{
Stdout: stdoutBuf,
Stderr: stderrBuf,
}

View File

@ -7,6 +7,7 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc/worker"
)
@ -26,12 +27,12 @@ type FakeResource struct {
result1 []atc.Version
result2 error
}
GetStub func(context.Context, worker.Volume, resource.IOConfig, atc.Source, atc.Params, atc.Version) (resource.VersionedSource, error)
GetStub func(context.Context, worker.Volume, runtime.IOConfig, atc.Source, atc.Params, atc.Version) (resource.VersionedSource, error)
getMutex sync.RWMutex
getArgsForCall []struct {
arg1 context.Context
arg2 worker.Volume
arg3 resource.IOConfig
arg3 runtime.IOConfig
arg4 atc.Source
arg5 atc.Params
arg6 atc.Version
@ -44,20 +45,20 @@ type FakeResource struct {
result1 resource.VersionedSource
result2 error
}
PutStub func(context.Context, resource.IOConfig, atc.Source, atc.Params) (resource.VersionResult, error)
PutStub func(context.Context, runtime.IOConfig, atc.Source, atc.Params) (runtime.VersionResult, error)
putMutex sync.RWMutex
putArgsForCall []struct {
arg1 context.Context
arg2 resource.IOConfig
arg2 runtime.IOConfig
arg3 atc.Source
arg4 atc.Params
}
putReturns struct {
result1 resource.VersionResult
result1 runtime.VersionResult
result2 error
}
putReturnsOnCall map[int]struct {
result1 resource.VersionResult
result1 runtime.VersionResult
result2 error
}
invocations map[string][][]interface{}
@ -129,13 +130,13 @@ func (fake *FakeResource) CheckReturnsOnCall(i int, result1 []atc.Version, resul
}{result1, result2}
}
func (fake *FakeResource) Get(arg1 context.Context, arg2 worker.Volume, arg3 resource.IOConfig, arg4 atc.Source, arg5 atc.Params, arg6 atc.Version) (resource.VersionedSource, error) {
func (fake *FakeResource) Get(arg1 context.Context, arg2 worker.Volume, arg3 runtime.IOConfig, arg4 atc.Source, arg5 atc.Params, arg6 atc.Version) (resource.VersionedSource, error) {
fake.getMutex.Lock()
ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)]
fake.getArgsForCall = append(fake.getArgsForCall, struct {
arg1 context.Context
arg2 worker.Volume
arg3 resource.IOConfig
arg3 runtime.IOConfig
arg4 atc.Source
arg5 atc.Params
arg6 atc.Version
@ -158,13 +159,13 @@ func (fake *FakeResource) GetCallCount() int {
return len(fake.getArgsForCall)
}
func (fake *FakeResource) GetCalls(stub func(context.Context, worker.Volume, resource.IOConfig, atc.Source, atc.Params, atc.Version) (resource.VersionedSource, error)) {
func (fake *FakeResource) GetCalls(stub func(context.Context, worker.Volume, runtime.IOConfig, atc.Source, atc.Params, atc.Version) (resource.VersionedSource, error)) {
fake.getMutex.Lock()
defer fake.getMutex.Unlock()
fake.GetStub = stub
}
func (fake *FakeResource) GetArgsForCall(i int) (context.Context, worker.Volume, resource.IOConfig, atc.Source, atc.Params, atc.Version) {
func (fake *FakeResource) GetArgsForCall(i int) (context.Context, worker.Volume, runtime.IOConfig, atc.Source, atc.Params, atc.Version) {
fake.getMutex.RLock()
defer fake.getMutex.RUnlock()
argsForCall := fake.getArgsForCall[i]
@ -197,12 +198,12 @@ func (fake *FakeResource) GetReturnsOnCall(i int, result1 resource.VersionedSour
}{result1, result2}
}
func (fake *FakeResource) Put(arg1 context.Context, arg2 resource.IOConfig, arg3 atc.Source, arg4 atc.Params) (resource.VersionResult, error) {
func (fake *FakeResource) Put(arg1 context.Context, arg2 runtime.IOConfig, arg3 atc.Source, arg4 atc.Params) (runtime.VersionResult, error) {
fake.putMutex.Lock()
ret, specificReturn := fake.putReturnsOnCall[len(fake.putArgsForCall)]
fake.putArgsForCall = append(fake.putArgsForCall, struct {
arg1 context.Context
arg2 resource.IOConfig
arg2 runtime.IOConfig
arg3 atc.Source
arg4 atc.Params
}{arg1, arg2, arg3, arg4})
@ -224,41 +225,41 @@ func (fake *FakeResource) PutCallCount() int {
return len(fake.putArgsForCall)
}
func (fake *FakeResource) PutCalls(stub func(context.Context, resource.IOConfig, atc.Source, atc.Params) (resource.VersionResult, error)) {
func (fake *FakeResource) PutCalls(stub func(context.Context, runtime.IOConfig, atc.Source, atc.Params) (runtime.VersionResult, error)) {
fake.putMutex.Lock()
defer fake.putMutex.Unlock()
fake.PutStub = stub
}
func (fake *FakeResource) PutArgsForCall(i int) (context.Context, resource.IOConfig, atc.Source, atc.Params) {
func (fake *FakeResource) PutArgsForCall(i int) (context.Context, runtime.IOConfig, atc.Source, atc.Params) {
fake.putMutex.RLock()
defer fake.putMutex.RUnlock()
argsForCall := fake.putArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
}
func (fake *FakeResource) PutReturns(result1 resource.VersionResult, result2 error) {
func (fake *FakeResource) PutReturns(result1 runtime.VersionResult, result2 error) {
fake.putMutex.Lock()
defer fake.putMutex.Unlock()
fake.PutStub = nil
fake.putReturns = struct {
result1 resource.VersionResult
result1 runtime.VersionResult
result2 error
}{result1, result2}
}
func (fake *FakeResource) PutReturnsOnCall(i int, result1 resource.VersionResult, result2 error) {
func (fake *FakeResource) PutReturnsOnCall(i int, result1 runtime.VersionResult, result2 error) {
fake.putMutex.Lock()
defer fake.putMutex.Unlock()
fake.PutStub = nil
if fake.putReturnsOnCall == nil {
fake.putReturnsOnCall = make(map[int]struct {
result1 resource.VersionResult
result1 runtime.VersionResult
result2 error
})
}
fake.putReturnsOnCall[i] = struct {
result1 resource.VersionResult
result1 runtime.VersionResult
result2 error
}{result1, result2}
}

View File

@ -5,8 +5,10 @@ import (
"io"
"path"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/runtime"
)
//go:generate counterfeiter . VersionedSource
@ -21,18 +23,18 @@ type VersionedSource interface {
Volume() worker.Volume
}
type VersionResult struct {
Version atc.Version `json:"version"`
Metadata []atc.MetadataField `json:"metadata,omitempty"`
}
//type VersionResult struct {
// Version atc.Version `json:"version"`
//
// Metadata []atc.MetadataField `json:"metadata,omitempty"`
//}
func NewGetVersionedSource(volume worker.Volume, version atc.Version, metadata []atc.MetadataField) VersionedSource {
return &getVersionedSource{
volume: volume,
resourceDir: ResourcesDir("get"),
versionResult: VersionResult{
versionResult: runtime.VersionResult{
Version: version,
Metadata: metadata,
},
@ -40,7 +42,7 @@ func NewGetVersionedSource(volume worker.Volume, version atc.Version, metadata [
}
type getVersionedSource struct {
versionResult VersionResult
versionResult runtime.VersionResult
volume worker.Volume
resourceDir string

View File

@ -1,4 +1,4 @@
package exec
package runtime
import "fmt"

View File

@ -1,5 +1,12 @@
package runtime
import (
"fmt"
"io"
"github.com/concourse/concourse/atc"
)
const (
InitializingEvent = "Initializing"
StartingEvent = "Starting"
@ -7,6 +14,67 @@ const (
)
type Event struct {
EventType string
ExitStatus int
EventType string
ExitStatus int
VersionResult VersionResult
}
type IOConfig struct {
Stdout io.Writer
Stderr io.Writer
}
type VersionResult struct {
Version atc.Version `json:"version"`
Metadata []atc.MetadataField `json:"metadata,omitempty"`
}
type PutRequest struct {
Source atc.Source `json:"source"`
Params atc.Params `json:"params,omitempty"`
}
type GetRequest struct {
Source atc.Source `json:"source"`
Params atc.Params `json:"params,omitempty"`
}
type Artifact interface {
ID() string
}
type TaskCacheArtifact struct {
TeamID int
JobID int
StepName string
Path string
}
func (art TaskCacheArtifact) ID() string {
return fmt.Sprintf("%d, %d, %s, %s", art.TeamID, art.JobID, art.StepName, art.Path)
}
type GetArtifact struct {
VolumeHandle string
}
func (art GetArtifact) ID() string {
return art.VolumeHandle
}
type TaskArtifact struct {
VolumeHandle string
}
func (art *TaskArtifact) ID() string {
return art.VolumeHandle
}
//type Runnable interface {
// Destroy() error
//
// VolumeMounts() []VolumeMount
//
// WorkerName() string
//
// MarkAsHijacked() error
//}

View File

@ -1,9 +1,15 @@
package worker
import (
"archive/tar"
"context"
"errors"
"io"
"github.com/DataDog/zstd"
"github.com/concourse/concourse/atc/runtime"
"github.com/hashicorp/go-multierror"
"code.cloudfoundry.org/lager"
)
@ -28,3 +34,142 @@ type ArtifactSource interface {
// `StreamTo` will be used to copy the data to the destination instead.
VolumeOn(lager.Logger, Worker) (Volume, bool, error)
}
type getArtifactSource struct {
artifact runtime.GetArtifact
volume Volume
}
func (source *getArtifactSource) StreamTo(ctx context.Context, logger lager.Logger, dest ArtifactDestination) error {
return streamToHelper(ctx, source.volume, logger, dest)
}
func (source *getArtifactSource) StreamFile(ctx context.Context, logger lager.Logger, path string) (io.ReadCloser, error) {
return streamFileHelper(ctx, source.volume, logger, path)
}
func (source *getArtifactSource) VolumeOn(logger lager.Logger, worker Worker) (Volume, bool, error) {
return worker.LookupVolume(logger, source.artifact.ID())
}
type taskArtifactSource struct {
artifact runtime.Artifact
volume Volume
}
func (source *taskArtifactSource) StreamTo(ctx context.Context, logger lager.Logger, dest ArtifactDestination) error {
return streamToHelper(ctx, source.volume, logger, dest)
}
func (source *taskArtifactSource) StreamFile(ctx context.Context, logger lager.Logger, path string) (io.ReadCloser, error) {
return streamFileHelper(ctx, source.volume, logger, path)
}
func (source *taskArtifactSource) VolumeOn(logger lager.Logger, worker Worker) (Volume, bool, error) {
return worker.LookupVolume(logger, source.artifact.ID())
}
type taskCacheArtifactSource struct {
artifact runtime.Artifact
volume Volume
}
//TODO: do we want these to be implemented?
// It was not used before
func (source *taskCacheArtifactSource) StreamTo(ctx context.Context, logger lager.Logger, dest ArtifactDestination) error {
return streamToHelper(ctx, source.volume, logger, dest)
}
func (source *taskCacheArtifactSource) StreamFile(ctx context.Context, logger lager.Logger, path string) (io.ReadCloser, error) {
return streamFileHelper(ctx, source.volume, logger, path)
}
func (source *taskCacheArtifactSource) VolumeOn(logger lager.Logger, worker Worker) (Volume, bool, error) {
if taskCacheArt, ok := source.artifact.(runtime.TaskCacheArtifact); ok {
return worker.FindVolumeForTaskCache(logger, taskCacheArt.TeamID, taskCacheArt.JobID, taskCacheArt.StepName, taskCacheArt.Path)
} else {
logger.Fatal("incorrect-artifact-type-for-TaskCacheArtifactSource", errors.New("ded"), nil)
panic(source.artifact)
}
}
func streamToHelper(
ctx context.Context,
s interface {
StreamOut(context.Context, string) (io.ReadCloser, error)
},
logger lager.Logger,
destination ArtifactDestination,
) error {
logger.Debug("start")
defer logger.Debug("end")
out, err := s.StreamOut(ctx, ".")
if err != nil {
logger.Error("failed", err)
return err
}
defer out.Close()
err = destination.StreamIn(ctx, ".", out)
if err != nil {
logger.Error("failed", err)
return err
}
return nil
}
func streamFileHelper(
ctx context.Context,
s interface {
StreamOut(context.Context, string) (io.ReadCloser, error)
},
logger lager.Logger,
path string,
) (io.ReadCloser, error) {
out, err := s.StreamOut(ctx, path)
if err != nil {
return nil, err
}
zstdReader := zstd.NewReader(out)
tarReader := tar.NewReader(zstdReader)
_, err = tarReader.Next()
if err != nil {
return nil, runtime.FileNotFoundError{Path: path}
}
return fileReadMultiCloser{
reader: tarReader,
closers: []io.Closer{
out,
zstdReader,
},
}, nil
}
type fileReadMultiCloser struct {
reader io.Reader
closers []io.Closer
}
func (frc fileReadMultiCloser) Read(p []byte) (n int, err error) {
return frc.reader.Read(p)
}
func (frc fileReadMultiCloser) Close() error {
var closeErrors error
for _, closer := range frc.closers {
err := closer.Close()
if err != nil {
closeErrors = multierror.Append(closeErrors, err)
}
}
return closeErrors
}

View File

@ -2,19 +2,23 @@ package worker
import (
"context"
"errors"
"fmt"
"io"
"path"
"strconv"
"time"
"github.com/concourse/concourse/atc/resource"
"code.cloudfoundry.org/garden"
"code.cloudfoundry.org/lager"
"github.com/concourse/baggageclaim"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/runtime"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-multierror"
)
const taskProcessID = "task"
@ -36,9 +40,41 @@ type Client interface {
ContainerPlacementStrategy,
db.ContainerMetadata,
ImageFetcherSpec,
TaskProcessSpec,
ProcessSpec,
chan runtime.Event,
) TaskResult
RunPutStep(
context.Context,
lager.Logger,
db.ContainerOwner,
ContainerSpec,
WorkerSpec,
atc.Source,
atc.Params,
ContainerPlacementStrategy,
db.ContainerMetadata,
ImageFetcherSpec,
string,
ProcessSpec,
chan runtime.Event,
) PutResult
RunGetStep(
context.Context,
lager.Logger,
db.ContainerOwner,
ContainerSpec,
WorkerSpec,
ContainerPlacementStrategy,
db.ContainerMetadata,
atc.VersionedResourceTypes,
resource.ResourceInstance,
Fetcher,
ImageFetchingDelegate,
db.UsedResourceCache,
ProcessSpec,
chan runtime.Event,
) (GetResult, error)
StreamFileFromArtifact(ctx context.Context, logger lager.Logger, artifact runtime.Artifact, filePath string) (io.ReadCloser, error)
}
func NewClient(pool Pool, provider WorkerProvider) *client {
@ -59,7 +95,20 @@ type TaskResult struct {
Err error
}
type TaskProcessSpec struct {
type PutResult struct {
Status int
VersionResult runtime.VersionResult
Err error
}
type GetResult struct {
Status int
VersionResult runtime.VersionResult
GetArtifact runtime.GetArtifact
Err error
}
type ProcessSpec struct {
Path string
Args []string
Dir string
@ -125,8 +174,8 @@ func (client *client) RunTaskStep(
workerSpec WorkerSpec,
strategy ContainerPlacementStrategy,
metadata db.ContainerMetadata,
imageSpec ImageFetcherSpec,
processSpec TaskProcessSpec,
imageFetcherSpec ImageFetcherSpec,
processSpec ProcessSpec,
events chan runtime.Event,
) TaskResult {
chosenWorker, err := client.chooseTaskWorker(
@ -150,11 +199,11 @@ func (client *client) RunTaskStep(
container, err := chosenWorker.FindOrCreateContainer(
ctx,
logger,
imageSpec.Delegate,
imageFetcherSpec.Delegate,
owner,
metadata,
containerSpec,
imageSpec.ResourceTypes,
imageFetcherSpec.ResourceTypes,
)
if err != nil {
@ -247,6 +296,71 @@ func (client *client) RunTaskStep(
return TaskResult{Status: status.processStatus, VolumeMounts: container.VolumeMounts(), Err: nil}
}
}
func (client *client) RunGetStep(
ctx context.Context,
logger lager.Logger,
owner db.ContainerOwner,
containerSpec ContainerSpec,
workerSpec WorkerSpec,
strategy ContainerPlacementStrategy,
containerMetadata db.ContainerMetadata,
resourceTypes atc.VersionedResourceTypes,
resourceInstance resource.ResourceInstance,
resourceFetcher Fetcher,
delegate ImageFetchingDelegate,
cache db.UsedResourceCache,
processSpec ProcessSpec,
events chan runtime.Event,
) (GetResult, error) {
vr := runtime.VersionResult{}
chosenWorker, err := client.pool.FindOrChooseWorkerForContainer(
ctx,
logger,
owner,
containerSpec,
workerSpec,
strategy,
)
if err != nil {
return GetResult{}, err
}
events <- runtime.Event{
EventType: runtime.StartingEvent,
}
// start of dependency on resource -> worker
getResult, err := resourceFetcher.Fetch(
ctx,
logger,
containerMetadata,
chosenWorker,
containerSpec,
processSpec,
resourceTypes,
resourceInstance,
delegate,
cache,
)
if err != nil {
logger.Error("failed-to-fetch-resource", err)
// TODO Define an error on Event for Concourse system errors or define an Concourse system error Exit Status
events <- runtime.Event{
EventType: runtime.FinishedEvent,
ExitStatus: 500,
VersionResult: vr,
}
return GetResult{}, err
}
events <- runtime.Event{
EventType: runtime.FinishedEvent,
ExitStatus: getResult.Status,
VersionResult: getResult.VersionResult,
}
return getResult, nil
}
func (client *client) chooseTaskWorker(
ctx context.Context,
logger lager.Logger,
@ -369,3 +483,104 @@ type processStatus struct {
processStatus int
processErr error
}
func (client *client) RunPutStep(
ctx context.Context,
logger lager.Logger,
owner db.ContainerOwner,
containerSpec ContainerSpec,
workerSpec WorkerSpec,
source atc.Source,
params atc.Params,
strategy ContainerPlacementStrategy,
metadata db.ContainerMetadata,
imageFetcherSpec ImageFetcherSpec,
resourceDir string,
spec ProcessSpec,
events chan runtime.Event,
) PutResult {
vr := runtime.VersionResult{}
chosenWorker, err := client.pool.FindOrChooseWorkerForContainer(
ctx,
logger,
owner,
containerSpec,
workerSpec,
strategy,
)
if err != nil {
return PutResult{Status: -1, VersionResult: vr, Err: err}
}
container, err := chosenWorker.FindOrCreateContainer(
ctx,
logger,
imageFetcherSpec.Delegate,
owner,
metadata,
containerSpec,
imageFetcherSpec.ResourceTypes,
)
if err != nil {
return PutResult{Status: -1, VersionResult: vr, Err: err}
}
// container already exited
exitStatusProp, err := container.Property(taskExitStatusPropertyName)
if err == nil {
logger.Info("already-exited", lager.Data{"status": exitStatusProp})
return PutResult{Status: -1, VersionResult: vr, Err: nil}
}
var result PutResult
err = RunScript(
ctx,
container,
spec.Path,
spec.Args,
runtime.PutRequest{
Params: params,
Source: source,
},
&vr,
spec.StderrWriter,
true,
events,
)
if err != nil {
if failErr, ok := err.(ErrResourceScriptFailed); ok {
result = PutResult{failErr.ExitStatus, runtime.VersionResult{}, failErr}
} else {
result = PutResult{-1, runtime.VersionResult{}, err}
}
} else {
result = PutResult{0, vr, nil}
}
return result
}
func (client *client) StreamFileFromArtifact(ctx context.Context, logger lager.Logger, artifact runtime.Artifact, filePath string) (io.ReadCloser, error) {
var getArtifact runtime.GetArtifact
var ok bool
if getArtifact, ok = artifact.(runtime.GetArtifact); !ok {
return nil, errors.New("unrecognized task config artifact type")
}
artifactVolume, found, err := client.FindVolume(logger, 0, getArtifact.ID())
if err != nil {
return nil, err
}
if !found {
return nil, baggageclaim.ErrVolumeNotFound
}
source := getArtifactSource{
artifact: getArtifact,
volume: artifactVolume,
}
return source.StreamFile(ctx, logger, filePath)
}

View File

@ -9,6 +9,7 @@ import (
"code.cloudfoundry.org/garden"
"code.cloudfoundry.org/garden/gardenfakes"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db/dbfakes"
"github.com/concourse/concourse/atc/db/lock/lockfakes"
"github.com/concourse/concourse/atc/exec/execfakes"
"github.com/concourse/concourse/atc/runtime"
@ -227,7 +228,7 @@ var _ = Describe("Client", func() {
fakeMetadata db.ContainerMetadata
fakeDelegate *execfakes.FakeTaskDelegate
fakeImageFetcherSpec worker.ImageFetcherSpec
fakeTaskProcessSpec worker.TaskProcessSpec
fakeTaskProcessSpec worker.ProcessSpec
fakeContainer *workerfakes.FakeContainer
eventChan chan runtime.Event
ctx context.Context
@ -298,7 +299,7 @@ var _ = Describe("Client", func() {
Delegate: fakeDelegate,
ResourceTypes: atc.VersionedResourceTypes{},
}
fakeTaskProcessSpec = worker.TaskProcessSpec{
fakeTaskProcessSpec = worker.ProcessSpec{
Path: "/some/path",
Args: []string{"some", "args"},
Dir: "/some/dir",
@ -546,14 +547,14 @@ var _ = Describe("Client", func() {
stdoutBuf = new(gbytes.Buffer)
stderrBuf = new(gbytes.Buffer)
fakeTaskProcessSpec = worker.TaskProcessSpec{
fakeTaskProcessSpec = worker.ProcessSpec{
StdoutWriter: stdoutBuf,
StderrWriter: stderrBuf,
}
})
It("does not send a Starting event", func() {
Expect(eventChan).ToNot(Receive(Equal(runtime.Event{runtime.StartingEvent, 0})))
Expect(eventChan).ToNot(Receive(Equal(runtime.Event{EventType: runtime.StartingEvent, ExitStatus: 0})))
})
It("does not create a new container", func() {
@ -696,14 +697,14 @@ var _ = Describe("Client", func() {
stdoutBuf = new(gbytes.Buffer)
stderrBuf = new(gbytes.Buffer)
fakeTaskProcessSpec = worker.TaskProcessSpec{
fakeTaskProcessSpec = worker.ProcessSpec{
StdoutWriter: stdoutBuf,
StderrWriter: stderrBuf,
}
})
It("sends a Starting event", func() {
Expect(eventChan).To(Receive(Equal(runtime.Event{"Starting", 0})))
Expect(eventChan).To(Receive(Equal(runtime.Event{EventType: "Starting",ExitStatus: 0})))
})
It("runs a new process in the container", func() {
@ -972,4 +973,475 @@ var _ = Describe("Client", func() {
})
})
})
Describe("RunPutStep", func() {
var (
ctx context.Context
cancel func()
owner db.ContainerOwner
containerSpec worker.ContainerSpec
workerSpec worker.WorkerSpec
source atc.Source
params atc.Params
metadata db.ContainerMetadata
imageSpec worker.ImageFetcherSpec
events chan runtime.Event
fakeChosenWorker *workerfakes.FakeWorker
fakeStrategy *workerfakes.FakeContainerPlacementStrategy
fakeDelegate *workerfakes.FakeImageFetchingDelegate
fakeResourceTypes atc.VersionedResourceTypes
fakeContainer *workerfakes.FakeContainer
fakeProcessSpec worker.ProcessSpec
versionResult runtime.VersionResult
status int
err error
disasterErr error
)
BeforeEach(func() {
ctx, cancel = context.WithCancel(context.Background())
owner = new(dbfakes.FakeContainerOwner)
containerSpec = worker.ContainerSpec{}
fakeStrategy = new(workerfakes.FakeContainerPlacementStrategy)
workerSpec = worker.WorkerSpec{}
fakeChosenWorker = new(workerfakes.FakeWorker)
fakeDelegate = new(workerfakes.FakeImageFetchingDelegate)
fakeResourceTypes = atc.VersionedResourceTypes{}
imageSpec = worker.ImageFetcherSpec{
Delegate: fakeDelegate,
ResourceTypes: fakeResourceTypes,
}
fakeContainer = new(workerfakes.FakeContainer)
disasterErr = errors.New("oh no")
stdout := new(gbytes.Buffer)
stderr := new(gbytes.Buffer)
fakeProcessSpec = worker.ProcessSpec{
Path: "/opt/resource/out",
StdoutWriter: stdout,
StderrWriter: stderr,
}
events = make(chan runtime.Event, 1)
source = atc.Source{"some": "super-secret-source"}
params = atc.Params{"some-param": "some-value"}
fakeChosenWorker = new(workerfakes.FakeWorker)
fakeChosenWorker.NameReturns("some-worker")
fakeChosenWorker.SatisfiesReturns(true)
fakeChosenWorker.FindOrCreateContainerReturns(fakeContainer, nil)
fakePool.FindOrChooseWorkerForContainerReturns(fakeChosenWorker, nil)
})
JustBeforeEach(func() {
result := client.RunPutStep(
ctx,
logger,
owner,
containerSpec,
workerSpec,
source,
params,
fakeStrategy,
metadata,
imageSpec,
"/tmp/build/put",
fakeProcessSpec,
events,
)
versionResult = result.VersionResult
err = result.Err
status = result.Status
})
It("finds/chooses a worker", func() {
Expect(fakePool.FindOrChooseWorkerForContainerCallCount()).To(Equal(1))
_, _, actualOwner, actualContainerSpec, actualWorkerSpec, strategy := fakePool.FindOrChooseWorkerForContainerArgsForCall(0)
Expect(actualOwner).To(Equal(owner))
Expect(actualContainerSpec).To(Equal(containerSpec))
Expect(actualWorkerSpec).To(Equal(workerSpec))
Expect(strategy).To(Equal(fakeStrategy))
})
Context("worker is chosen", func() {
BeforeEach(func() {
fakePool.FindOrChooseWorkerReturns(fakeChosenWorker, nil)
})
It("finds or creates a put container on that worker", func() {
Expect(fakeChosenWorker.FindOrCreateContainerCallCount()).To(Equal(1))
_, _, actualDelegate, actualOwner, actualMetadata, actualContainerSpec, actualResourceTypes := fakeChosenWorker.FindOrCreateContainerArgsForCall(0)
Expect(actualDelegate).To(Equal(fakeDelegate))
Expect(actualOwner).To(Equal(owner))
Expect(actualContainerSpec).To(Equal(containerSpec))
Expect(actualMetadata).To(Equal(metadata))
Expect(actualResourceTypes).To(Equal(fakeResourceTypes))
})
})
Context("worker selection returns an error", func() {
BeforeEach(func() {
fakePool.FindOrChooseWorkerForContainerReturns(nil, disasterErr)
})
It("returns the error", func() {
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(disasterErr))
Expect(versionResult).To(Equal(runtime.VersionResult{}))
})
})
Context("found a container that has already exited", func() {
var status int
BeforeEach(func() {
status = 8
fakeChosenWorker.FindOrCreateContainerReturns(fakeContainer, nil)
fakeContainer.PropertyStub = func(prop string) (result string, err error) {
if prop == "concourse:exit-status" {
return "8", nil
}
return "", errors.New("unhandled property")
}
})
It("does not attach to any process", func() {
Expect(fakeContainer.AttachCallCount()).To(BeZero())
})
It("returns result of container process", func() {
Expect(err).ToNot(HaveOccurred())
Expect(status).To(Equal(8))
})
})
Context("container has not already exited", func() {
var (
fakeProcess *gardenfakes.FakeProcess
fakeProcessExitCode int
stdoutBuf *gbytes.Buffer
stderrBuf *gbytes.Buffer
)
BeforeEach(func() {
stdoutBuf = new(gbytes.Buffer)
stderrBuf = new(gbytes.Buffer)
fakeProcess = new(gardenfakes.FakeProcess)
fakeContainer.PropertyReturns("", errors.New("not exited"))
})
Context("found container that is already running", func() {
var expectedVersionResult runtime.VersionResult
BeforeEach(func() {
fakeContainer.AttachStub = func(arg1 context.Context, arg2 string, arg3 garden.ProcessIO) (garden.Process, error){
_, _ = arg3.Stdout.Write([]byte(`{"version": { "foo": "bar" }}`))
return fakeProcess, nil
}
expectedVersionResult = runtime.VersionResult{
Version: atc.Version(map[string]string{"foo": "bar"}),
Metadata: nil,
}
})
It("does not send a Starting event", func() {
Expect(events).ToNot(Receive(Equal(runtime.Event{EventType: runtime.StartingEvent, ExitStatus: 0})))
})
It("does not create a new container", func() {
Expect(fakeContainer.RunCallCount()).To(BeZero())
})
It("attaches to the running process", func() {
Expect(err).ToNot(HaveOccurred())
Expect(fakeContainer.AttachCallCount()).To(Equal(1))
Expect(fakeContainer.RunCallCount()).To(Equal(0))
_, _, actualProcessIO := fakeContainer.AttachArgsForCall(0)
Expect(actualProcessIO.Stderr).To(Equal(stderrBuf))
})
Context("when the process is interrupted", func() {
var stopped chan struct{}
BeforeEach(func() {
stopped = make(chan struct{})
fakeProcess.WaitStub = func() (int, error) {
defer GinkgoRecover()
<-stopped
return 128 + 15, nil
}
fakeContainer.StopStub = func(bool) error {
close(stopped)
return nil
}
cancel()
})
It("stops the container", func() {
Expect(fakeContainer.StopCallCount()).To(Equal(1))
Expect(fakeContainer.StopArgsForCall(0)).To(BeFalse())
Expect(err).To(Equal(context.Canceled))
})
Context("when container.stop returns an error", func() {
var disaster error
BeforeEach(func() {
disaster = errors.New("gotta get away")
fakeContainer.StopStub = func(bool) error {
close(stopped)
return disaster
}
})
It("doesn't return the error", func() {
Expect(err).To(Equal(context.Canceled))
})
})
})
Context("when the process exits successfully", func() {
BeforeEach(func() {
fakeProcessExitCode = 0
fakeProcess.WaitReturns(fakeProcessExitCode, nil)
})
It("returns a successful result", func() {
Expect(versionResult).To(Equal(expectedVersionResult))
Expect(status).To(BeZero())
Expect(err).ToNot(HaveOccurred())
})
})
Context("when the process exits with an error", func() {
disaster := errors.New("process failed")
BeforeEach(func() {
fakeProcessExitCode = 128 + 15
fakeProcess.WaitReturns(fakeProcessExitCode, disaster)
})
It("returns an unsuccessful result", func() {
Expect(status).To(Equal(-1))
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(disaster))
Expect(versionResult).To(Equal(runtime.VersionResult{}))
})
It("returns no version results", func() {
Expect(versionResult).To(Equal(runtime.VersionResult{}))
})
})
Context("when the process exits with nonzero status", func() {
BeforeEach(func() {
fakeProcessExitCode = 128 + 15
fakeProcess.WaitReturns(fakeProcessExitCode, nil)
})
It("returns an unsuccessful result", func() {
Expect(status).To(Equal(fakeProcessExitCode))
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(worker.ErrResourceScriptFailed{}))
Expect(versionResult).To(Equal(runtime.VersionResult{}))
})
It("returns no version results", func() {
Expect(versionResult).To(Equal(runtime.VersionResult{}))
})
})
})
Context("created a new container", func() {
BeforeEach(func() {
fakeContainer.AttachReturns(nil, errors.New("container not running"))
fakeContainer.RunReturns(fakeProcess, nil)
stdoutBuf = new(gbytes.Buffer)
stderrBuf = new(gbytes.Buffer)
fakeProcessSpec = worker.ProcessSpec{
Path: "/opt/resource/out",
Args: []string{"/tmp/build/put"},
StdoutWriter: stdoutBuf,
StderrWriter: stderrBuf,
}
fakeContainer.RunStub = func(arg1 context.Context, arg2 garden.ProcessSpec, arg3 garden.ProcessIO) (garden.Process, error){
_, _ = arg3.Stdout.Write([]byte(`{"version": { "foo": "bar" }}`))
return fakeProcess, nil
}
})
It("sends a Starting event", func() {
Expect(events).To(Receive(Equal(runtime.Event{EventType: "Starting",ExitStatus: 0})))
})
It("runs a new process in the container", func() {
Eventually(fakeContainer.RunCallCount()).Should(Equal(1))
_, gardenProcessSpec, actualProcessIO := fakeContainer.RunArgsForCall(0)
Expect(gardenProcessSpec.ID).To(Equal("resource"))
Expect(gardenProcessSpec.Path).To(Equal(fakeProcessSpec.Path))
Expect(gardenProcessSpec.Args).To(ConsistOf(fakeProcessSpec.Args))
Expect(actualProcessIO.Stdout).To(Not(Equal(stdoutBuf)))
Expect(actualProcessIO.Stderr).To(Equal(stderrBuf))
})
Context("when the process is interrupted", func() {
var stopped chan struct{}
BeforeEach(func() {
stopped = make(chan struct{})
fakeProcess.WaitStub = func() (int, error) {
defer GinkgoRecover()
<-stopped
return 128 + 15, nil // wat?
}
fakeContainer.StopStub = func(bool) error {
close(stopped)
return nil
}
cancel()
})
It("stops the container", func() {
Expect(fakeContainer.StopCallCount()).To(Equal(1))
Expect(fakeContainer.StopArgsForCall(0)).To(BeFalse())
Expect(err).To(Equal(context.Canceled))
})
Context("when container.stop returns an error", func() {
var disaster error
BeforeEach(func() {
disaster = errors.New("gotta get away")
fakeContainer.StopStub = func(bool) error {
close(stopped)
return disaster
}
})
It("doesn't return the error", func() {
Expect(err).To(Equal(context.Canceled))
})
})
})
Context("when the process exits successfully", func() {
// It("puts the resource with the given context", func() {
// Expect(fakeResource.PutCallCount()).To(Equal(1))
// putCtx, _, _, _ := fakeResource.PutArgsForCall(0)
// Expect(putCtx).To(Equal(ctx))
// })
// It("puts the resource with the correct source and params", func() {
// Expect(fakeResource.PutCallCount()).To(Equal(1))
//
// _, _, putSource, putParams := fakeResource.PutArgsForCall(0)
// Expect(putSource).To(Equal(atc.Source{"some": "super-secret-source"}))
// Expect(putParams).To(Equal(atc.Params{"some-param": "some-value"}))
// })
// It("puts the resource with the io config forwarded", func() {
// Expect(fakeResource.PutCallCount()).To(Equal(1))
//
// _, ioConfig, _, _ := fakeResource.PutArgsForCall(0)
// Expect(ioConfig.Stdout).To(Equal(stdoutBuf))
// Expect(ioConfig.Stderr).To(Equal(stderrBuf))
// })
// It("runs the get resource action", func() {
// Expect(fakeResource.PutCallCount()).To(Equal(1))
// })
It("returns a successful result", func() {
Expect(status).To(BeZero())
Expect(err).ToNot(HaveOccurred())
})
It("saves the exit status property", func() {
Expect(fakeContainer.SetPropertyCallCount()).To(Equal(1))
name, value := fakeContainer.SetPropertyArgsForCall(0)
Expect(name).To(Equal("concourse:resource-result"))
Expect(value).To(Equal(string(`{"version": { "foo": "bar" }}`)))
})
Context("when saving the exit status succeeds", func() {
BeforeEach(func() {
fakeContainer.SetPropertyReturns(nil)
})
It("returns successfully", func() {
Expect(err).ToNot(HaveOccurred())
})
})
Context("when saving the exit status fails", func() {
disaster := errors.New("nope")
BeforeEach(func() {
fakeContainer.SetPropertyStub = func(name string, value string) error {
defer GinkgoRecover()
if name == "concourse:resource-result" {
return disaster
}
return nil
}
})
It("returns the error", func() {
Expect(err).To(Equal(disaster))
})
})
})
Context("when the process exits on failure", func() {
BeforeEach(func() {
fakeProcessExitCode = 128 + 15
fakeProcess.WaitReturns(fakeProcessExitCode, nil)
})
It("returns an unsuccessful result", func() {
Expect(status).To(Equal(fakeProcessExitCode))
Expect(err).To(HaveOccurred())
Expect(err).To(BeAssignableToTypeOf(worker.ErrResourceScriptFailed{}))
})
})
Context("when running the container fails with an error", func() {
disaster := errors.New("nope")
BeforeEach(func() {
fakeContainer.RunReturns(nil, disaster)
})
It("returns the error", func() {
Expect(err).To(Equal(disaster))
})
})
})
})
Context("worker.FindOrCreateContainer errored", func() {
BeforeEach(func() {
fakeChosenWorker.FindOrCreateContainerReturns(nil, disasterErr)
})
It("returns the error immediately", func() {
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(disasterErr))
Expect(versionResult).To(Equal(runtime.VersionResult{}))
})
})
})
})

View File

@ -4,6 +4,8 @@ import (
"fmt"
"strings"
"github.com/concourse/concourse/atc/runtime"
"code.cloudfoundry.org/garden"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
@ -28,6 +30,8 @@ type ContainerSpec struct {
// Working directory for processes run in the container.
Dir string
InputFooBars []FooBarInput
// Inputs to provide to the container. Inputs with a volume local to the
// selected worker will be made available via a COW volume; others will be
// streamed.
@ -46,6 +50,13 @@ type ContainerSpec struct {
User string
}
//go:generate counterfeiter . FooBarInput
type FooBarInput interface {
Artifact() runtime.Artifact
DestinationPath() string
}
//go:generate counterfeiter . InputSource
type InputSource interface {
@ -63,10 +74,12 @@ type BindMountSource interface {
type OutputPaths map[string]string
type ImageSpec struct {
ResourceType string
ImageURL string
ImageResource *ImageResource
ResourceType string
ImageURL string
ImageResource *ImageResource
// populate ImageArtifactSource if ImageArtifact is specified
ImageArtifactSource ArtifactSource
ImageArtifact runtime.Artifact
Privileged bool
}

View File

@ -77,8 +77,6 @@ func (provider *dbWorkerProvider) RunningWorkers(logger lager.Logger) ([]Worker,
return nil, err
}
tikTok := clock.NewClock()
workers := []Worker{}
for _, savedWorker := range savedWorkers {
@ -89,7 +87,6 @@ func (provider *dbWorkerProvider) RunningWorkers(logger lager.Logger) ([]Worker,
workerLog := logger.Session("running-worker")
worker := provider.NewGardenWorker(
workerLog,
tikTok,
savedWorker,
buildContainersCountPerWorker[savedWorker.Name()],
)
@ -115,7 +112,7 @@ func (provider *dbWorkerProvider) FindWorkersForContainerByOwner(
var workers []Worker
for _, w := range dbWorkers {
worker := provider.NewGardenWorker(logger, clock.NewClock(), w, 0)
worker := provider.NewGardenWorker(logger, w, 0)
if worker.IsVersionCompatible(logger, provider.workerVersion) {
workers = append(workers, worker)
}
@ -141,7 +138,7 @@ func (provider *dbWorkerProvider) FindWorkerForContainer(
return nil, false, nil
}
worker := provider.NewGardenWorker(logger, clock.NewClock(), dbWorker, 0)
worker := provider.NewGardenWorker(logger, dbWorker, 0)
if !worker.IsVersionCompatible(logger, provider.workerVersion) {
return nil, false, nil
}
@ -165,14 +162,14 @@ func (provider *dbWorkerProvider) FindWorkerForVolume(
return nil, false, nil
}
worker := provider.NewGardenWorker(logger, clock.NewClock(), dbWorker, 0)
worker := provider.NewGardenWorker(logger, dbWorker, 0)
if !worker.IsVersionCompatible(logger, provider.workerVersion) {
return nil, false, nil
}
return worker, true, err
}
func (provider *dbWorkerProvider) NewGardenWorker(logger lager.Logger, tikTok clock.Clock, savedWorker db.Worker, buildContainersCount int) Worker {
func (provider *dbWorkerProvider) NewGardenWorker(logger lager.Logger, savedWorker db.Worker, buildContainersCount int) Worker {
gcf := gclient.NewGardenClientFactory(
provider.dbWorkerFactory,
logger.Session("garden-connection"),

188
atc/worker/fetcher.go Normal file
View File

@ -0,0 +1,188 @@
package worker
import (
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"path/filepath"
"time"
"github.com/concourse/concourse/atc/runtime"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/resource"
)
const GetResourceLockInterval = 5 * time.Second
var ErrFailedToGetLock = errors.New("failed to get lock")
var ErrInterrupted = errors.New("interrupted")
//go:generate counterfeiter . Fetcher
type Fetcher interface {
Fetch(
ctx context.Context,
logger lager.Logger,
containerMetadata db.ContainerMetadata,
gardenWorker Worker,
containerSpec ContainerSpec,
processSpec ProcessSpec,
resourceTypes atc.VersionedResourceTypes,
resourceInstance resource.ResourceInstance,
imageFetchingDelegate ImageFetchingDelegate,
cache db.UsedResourceCache,
) (GetResult, error)
}
func NewFetcher(
clock clock.Clock,
lockFactory lock.LockFactory,
fetchSourceFactory FetchSourceFactory,
) Fetcher {
return &fetcher{
clock: clock,
lockFactory: lockFactory,
fetchSourceFactory: fetchSourceFactory,
}
}
type fetcher struct {
clock clock.Clock
lockFactory lock.LockFactory
fetchSourceFactory FetchSourceFactory
}
func ResourcesDir(suffix string) string {
return filepath.Join("/tmp", "build", suffix)
}
func (f *fetcher) Fetch(
ctx context.Context,
logger lager.Logger,
containerMetadata db.ContainerMetadata,
gardenWorker Worker,
containerSpec ContainerSpec,
processSpec ProcessSpec,
resourceTypes atc.VersionedResourceTypes,
resourceInstance resource.ResourceInstance, // can we not use resource package here?
imageFetchingDelegate ImageFetchingDelegate,
cache db.UsedResourceCache,
//) (resource.VersionedSource, error) {
) (GetResult, error) {
containerSpec.Outputs = map[string]string{
"resource": ResourcesDir("get"),
}
source := f.fetchSourceFactory.NewFetchSource(logger, gardenWorker, resourceInstance, cache, resourceTypes, containerSpec, processSpec, containerMetadata, imageFetchingDelegate)
ticker := f.clock.NewTicker(GetResourceLockInterval)
defer ticker.Stop()
// figure out the lockname earlier, because we have all the info
lockName, err := lockName(string(resourceInstance.ResourceType()),
resourceInstance.Version(),
resourceInstance.Source(),
resourceInstance.Params(),
gardenWorker.Name())
if err != nil {
return GetResult{}, err
}
versionedSource, err := f.fetchWithLock(ctx, logger, source, imageFetchingDelegate.Stdout(), cache, lockName)
if err != ErrFailedToGetLock {
return versionedSource, err
}
for {
select {
case <-ticker.C():
//TODO this is called redundantly?
result, err := f.fetchWithLock(ctx, logger, source, imageFetchingDelegate.Stdout(), cache, lockName)
if err != nil {
if err == ErrFailedToGetLock {
break
}
return GetResult{}, err
}
return result, nil
case <-ctx.Done():
return GetResult{}, ctx.Err()
}
}
}
type lockID struct {
Type string `json:"type,omitempty"`
Version atc.Version `json:"version,omitempty"`
Source atc.Source `json:"source,omitempty"`
Params atc.Params `json:"params,omitempty"`
WorkerName string `json:"worker_name,omitempty"`
}
func lockName(resourceType string, version atc.Version, source atc.Source, params atc.Params, workerName string) (string, error) {
id := &lockID{
Type: resourceType,
Version: version,
Source: source,
Params: params,
WorkerName: workerName,
}
taskNameJSON, err := json.Marshal(id)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", sha256.Sum256(taskNameJSON)), nil
}
func (f *fetcher) fetchWithLock(
ctx context.Context,
logger lager.Logger,
source FetchSource,
stdout io.Writer,
cache db.UsedResourceCache,
lockName string,
) (GetResult, error) {
volume, found, err := source.Find()
if err != nil {
return GetResult{}, err
}
if found {
result := GetResult{
0,
// todo: figure out what logically should be returned for VersionResult
runtime.VersionResult{},
runtime.GetArtifact{VolumeHandle: volume.Handle()},
nil,
}
return result, nil
}
lockLogger := logger.Session("lock-task", lager.Data{"lock-name": lockName})
lock, acquired, err := f.lockFactory.Acquire(lockLogger, lock.NewTaskLockID(lockName))
if err != nil {
lockLogger.Error("failed-to-get-lock", err)
return GetResult{}, ErrFailedToGetLock
}
if !acquired {
lockLogger.Debug("did-not-get-lock")
return GetResult{}, ErrFailedToGetLock
}
defer lock.Release()
return source.Create(ctx)
}

View File

@ -1,4 +1,4 @@
package fetcher_test
package worker_test
import (
"testing"

View File

@ -1,4 +1,4 @@
package fetcher_test
package worker_test
import (
"context"
@ -15,10 +15,8 @@ import (
"github.com/concourse/concourse/atc/fetcher/fetcherfakes"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/resource/resourcefakes"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/worker/workerfakes"
. "github.com/concourse/concourse/atc/fetcher"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@ -67,7 +65,7 @@ var _ = Describe("Fetcher", func() {
lagertest.NewTestLogger("test"),
db.ContainerMetadata{},
fakeWorker,
worker.ContainerSpec{
ContainerSpec{
TeamID: teamID,
},
atc.VersionedResourceTypes{},

View File

@ -13,7 +13,6 @@ import (
"github.com/DataDog/zstd"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/fetcher"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/worker"
)
@ -53,14 +52,14 @@ type ImageResourceFetcher interface {
type imageResourceFetcherFactory struct {
dbResourceCacheFactory db.ResourceCacheFactory
dbResourceConfigFactory db.ResourceConfigFactory
resourceFetcher fetcher.Fetcher
resourceFetcher worker.Fetcher
resourceFactory resource.ResourceFactory
}
func NewImageResourceFetcherFactory(
dbResourceCacheFactory db.ResourceCacheFactory,
dbResourceConfigFactory db.ResourceConfigFactory,
resourceFetcher fetcher.Fetcher,
resourceFetcher worker.Fetcher,
resourceFactory resource.ResourceFactory,
) ImageResourceFetcherFactory {
return &imageResourceFetcherFactory{
@ -97,7 +96,7 @@ func (f *imageResourceFetcherFactory) NewImageResourceFetcher(
type imageResourceFetcher struct {
worker worker.Worker
resourceFactory resource.ResourceFactory
resourceFetcher fetcher.Fetcher
resourceFetcher worker.Fetcher
dbResourceCacheFactory db.ResourceCacheFactory
dbResourceConfigFactory db.ResourceConfigFactory

View File

@ -15,7 +15,6 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/dbfakes"
"github.com/concourse/concourse/atc/fetcher"
"github.com/concourse/concourse/atc/fetcher/fetcherfakes"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/resource/resourcefakes"
@ -219,11 +218,11 @@ var _ = Describe("Image", func() {
Context("when fetching resource fails", func() {
BeforeEach(func() {
fakeResourceFetcher.FetchReturns(nil, fetcher.ErrInterrupted)
fakeResourceFetcher.FetchReturns(nil, worker.ErrInterrupted)
})
It("returns error", func() {
Expect(fetchErr).To(Equal(fetcher.ErrInterrupted))
Expect(fetchErr).To(Equal(worker.ErrInterrupted))
})
})
@ -486,11 +485,11 @@ var _ = Describe("Image", func() {
Context("when fetching resource fails", func() {
BeforeEach(func() {
fakeResourceFetcher.FetchReturns(nil, fetcher.ErrInterrupted)
fakeResourceFetcher.FetchReturns(nil, worker.ErrInterrupted)
})
It("returns error", func() {
Expect(fetchErr).To(Equal(fetcher.ErrInterrupted))
Expect(fetchErr).To(Equal(worker.ErrInterrupted))
})
})

View File

@ -7,7 +7,6 @@ import (
"math/rand"
"time"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/db"
@ -37,7 +36,6 @@ type WorkerProvider interface {
NewGardenWorker(
logger lager.Logger,
tikTok clock.Clock,
savedWorker db.Worker,
numBuildWorkers int,
) Worker

View File

@ -0,0 +1,263 @@
package worker
// this file takes in a resource and returns a source (Volume)
// we might not need to model this way
import (
"context"
"github.com/concourse/concourse/atc/runtime"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/resource"
)
//go:generate counterfeiter . FetchSource
type FetchSource interface {
//LockName() (string, error)
Find() (Volume, bool, error)
Create(context.Context) (GetResult, error)
}
//go:generate counterfeiter . FetchSourceFactory
type FetchSourceFactory interface {
NewFetchSource(
logger lager.Logger,
worker Worker,
resourceInstance resource.ResourceInstance,
cache db.UsedResourceCache,
resourceTypes atc.VersionedResourceTypes,
containerSpec ContainerSpec,
processSpec ProcessSpec,
containerMetadata db.ContainerMetadata,
imageFetchingDelegate ImageFetchingDelegate,
) FetchSource
}
type fetchSourceFactory struct {
resourceCacheFactory db.ResourceCacheFactory
resourceFactory resource.ResourceFactory
}
func NewFetchSourceFactory(
resourceCacheFactory db.ResourceCacheFactory,
resourceFactory resource.ResourceFactory,
) FetchSourceFactory {
return &fetchSourceFactory{
resourceCacheFactory: resourceCacheFactory,
resourceFactory: resourceFactory,
}
}
func (r *fetchSourceFactory) NewFetchSource(
logger lager.Logger,
worker Worker,
resourceInstance resource.ResourceInstance,
cache db.UsedResourceCache,
resourceTypes atc.VersionedResourceTypes,
containerSpec ContainerSpec,
processSpec ProcessSpec,
containerMetadata db.ContainerMetadata,
imageFetchingDelegate ImageFetchingDelegate,
) FetchSource {
return &resourceInstanceFetchSource{
logger: logger,
worker: worker,
resourceInstance: resourceInstance,
cache: cache,
resourceTypes: resourceTypes,
containerSpec: containerSpec,
processSpec: processSpec,
containerMetadata: containerMetadata,
imageFetchingDelegate: imageFetchingDelegate,
dbResourceCacheFactory: r.resourceCacheFactory,
resourceFactory: r.resourceFactory,
}
}
type resourceInstanceFetchSource struct {
logger lager.Logger
worker Worker
resourceInstance resource.ResourceInstance
cache db.UsedResourceCache
resourceTypes atc.VersionedResourceTypes
containerSpec ContainerSpec
processSpec ProcessSpec
containerMetadata db.ContainerMetadata
imageFetchingDelegate ImageFetchingDelegate
dbResourceCacheFactory db.ResourceCacheFactory
resourceFactory resource.ResourceFactory
}
//func (s *resourceInstanceFetchSource) LockName() (string, error) {
// return s.resourceInstance.LockName(s.worker.Name())
//}
func findOn(logger lager.Logger, w Worker, cache db.UsedResourceCache) (volume Volume, found bool, err error) {
return w.FindVolumeForResourceCache(
logger,
cache,
)
}
func (s *resourceInstanceFetchSource) Find() (Volume, bool, error) {
sLog := s.logger.Session("find")
volume, found, err := findOn(s.logger, s.worker, s.cache)
if err != nil {
sLog.Error("failed-to-find-initialized-on", err)
return nil, false, err
}
if !found {
return nil, false, nil
}
//metadata, err := s.dbResourceCacheFactory.ResourceCacheMetadata(s.resourceInstance.ResourceCache())
//if err != nil {
// sLog.Error("failed-to-get-resource-cache-metadata", err)
// return nil, false, err
//}
//
//s.logger.Debug("found-initialized-versioned-source", lager.Data{"version": s.resourceInstance.Version(), "metadata": metadata.ToATCMetadata()})
return volume,
true, nil
}
// Create runs under the lock but we need to make sure volume does not exist
// yet before creating it under the lock
func (s *resourceInstanceFetchSource) Create(ctx context.Context) (GetResult, error) {
sLog := s.logger.Session("create")
result := GetResult{}
volume, found, err := s.Find()
if err != nil {
return result, err
}
if found {
result = GetResult{
0,
// todo: figure out what logically should be returned for VersionResult
runtime.VersionResult{},
runtime.GetArtifact{VolumeHandle: volume.Handle()},
nil,
}
return result, nil
}
s.containerSpec.BindMounts = []BindMountSource{
&CertsVolumeMount{Logger: s.logger},
}
container, err := s.worker.FindOrCreateContainer(
ctx,
s.logger,
s.imageFetchingDelegate,
s.resourceInstance.ContainerOwner(),
s.containerMetadata,
s.containerSpec,
s.resourceTypes,
)
if err != nil {
sLog.Error("failed-to-construct-resource", err)
result = GetResult{
1,
// todo: figure out what logically should be returned for VersionResult
runtime.VersionResult{},
runtime.GetArtifact{VolumeHandle: volume.Handle()},
err,
}
return result, err
}
mountPath := resource.ResourcesDir("get")
for _, mount := range container.VolumeMounts() {
if mount.MountPath == mountPath {
volume = mount.Volume
break
}
}
vr := runtime.VersionResult{}
events := make(chan runtime.Event)
// todo: we want to decouple this resource from the container
//res := s.resourceFactory.NewResourceForContainer(container)
//versionedSource, err = res.Get(
// ctx,
// volume,
// runtime.IOConfig{
// Stdout: s.imageFetchingDelegate.Stdout(),
// Stderr: s.imageFetchingDelegate.Stderr(),
// },
// s.resourceInstance.Source(),
// s.resourceInstance.Params(),
// s.resourceInstance.Version(),
//)
//if err != nil {
// sLog.Error("failed-to-fetch-resource", err)
// return nil, err
//}
err = RunScript(
ctx,
container,
s.processSpec.Path,
s.processSpec.Args,
runtime.GetRequest{
Params: s.resourceInstance.Params(),
Source: s.resourceInstance.Source(),
},
&vr,
s.processSpec.StderrWriter,
true,
events,
)
if err != nil {
sLog.Error("failed-to-fetch-resource", err)
// TODO Is this compatible with previous behaviour of returning a nil when error type is NOT ErrResourceScriptFailed
// if error returned from running the actual script
if failErr, ok := err.(ErrResourceScriptFailed); ok {
result = GetResult{failErr.ExitStatus, runtime.VersionResult{}, runtime.GetArtifact{}, failErr}
return result, nil
}
return result, err
}
err = volume.SetPrivileged(false)
if err != nil {
sLog.Error("failed-to-set-volume-unprivileged", err)
return result, err
}
// TODO this should happen get_step exec rather than here
// seems like core logic
//err = volume.InitializeResourceCache(s.resourceInstance.ResourceCache())
//if err != nil {
// sLog.Error("failed-to-initialize-cache", err)
// return nil, err
//}
//
//err = s.dbResourceCacheFactory.UpdateResourceCacheMetadata(s.resourceInstance.ResourceCache(), versionedSource.Metadata())
//if err != nil {
// s.logger.Error("failed-to-update-resource-cache-metadata", err, lager.Data{"resource-cache": s.resourceInstance.ResourceCache()})
// return nil, err
//}
return GetResult{
VersionResult: vr,
GetArtifact: runtime.GetArtifact{
VolumeHandle: volume.Handle(),
},
}, nil
}

View File

@ -1,4 +1,4 @@
package fetcher_test
package worker_test
import (
"context"
@ -12,10 +12,8 @@ import (
"github.com/concourse/concourse/atc/db/dbfakes"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/resource/resourcefakes"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/worker/workerfakes"
. "github.com/concourse/concourse/atc/fetcher"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@ -62,7 +60,7 @@ var _ = Describe("ResourceInstanceFetchSource", func() {
}
fakeVolume = new(workerfakes.FakeVolume)
fakeContainer.VolumeMountsReturns([]worker.VolumeMount{
fakeContainer.VolumeMountsReturns([]VolumeMount{
{
Volume: fakeVolume,
MountPath: resource.ResourcesDir("get"),
@ -107,10 +105,10 @@ var _ = Describe("ResourceInstanceFetchSource", func() {
fakeWorker,
fakeResourceInstance,
resourceTypes,
worker.ContainerSpec{
ContainerSpec{
TeamID: 42,
Tags: []string{},
ImageSpec: worker.ImageSpec{
ImageSpec: ImageSpec{
ResourceType: "fake-resource-type",
},
Outputs: map[string]string{
@ -207,13 +205,13 @@ var _ = Describe("ResourceInstanceFetchSource", func() {
Expect(delegate).To(Equal(fakeDelegate))
Expect(owner).To(Equal(db.NewBuildStepContainerOwner(43, atc.PlanID("some-plan-id"), 42)))
Expect(actualMetadata).To(Equal(metadata))
Expect(containerSpec).To(Equal(worker.ContainerSpec{
Expect(containerSpec).To(Equal(ContainerSpec{
TeamID: 42,
Tags: []string{},
ImageSpec: worker.ImageSpec{
ImageSpec: ImageSpec{
ResourceType: "fake-resource-type",
},
BindMounts: []worker.BindMountSource{&worker.CertsVolumeMount{Logger: logger}},
BindMounts: []BindMountSource{&CertsVolumeMount{Logger: logger}},
Outputs: map[string]string{
"resource": resource.ResourcesDir("get"),
},

152
atc/worker/run_script.go Normal file
View File

@ -0,0 +1,152 @@
package worker
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/concourse/concourse/atc/runtime"
"io"
"code.cloudfoundry.org/garden"
)
const resourceResultPropertyName = "concourse:resource-result"
const ResourceProcessID = "resource"
type ErrResourceScriptFailed struct {
Path string
Args []string
ExitStatus int
Stderr string
}
func (err ErrResourceScriptFailed) Error() string {
msg := fmt.Sprintf(
"resource script '%s %v' failed: exit status %d",
err.Path,
err.Args,
err.ExitStatus,
)
if len(err.Stderr) > 0 {
msg += "\n\nstderr:\n" + err.Stderr
}
return msg
}
func RunScript(
ctx context.Context,
container Container,
path string,
args []string,
input interface{},
output interface{},
logDest io.Writer,
recoverable bool,
events chan runtime.Event,
) error {
request, err := json.Marshal(input)
if err != nil {
return err
}
if recoverable {
result, err := container.Property(resourceResultPropertyName)
if err == nil {
return json.Unmarshal([]byte(result), &output)
}
}
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
processIO := garden.ProcessIO{
Stdin: bytes.NewBuffer(request),
Stdout: stdout,
}
if logDest != nil {
processIO.Stderr = logDest
} else {
processIO.Stderr = stderr
}
var process garden.Process
if recoverable {
process, err = container.Attach(ctx, ResourceProcessID, processIO)
if err != nil {
events <- runtime.Event{
EventType: runtime.StartingEvent,
}
process, err = container.Run(
ctx,
garden.ProcessSpec{
ID: ResourceProcessID,
Path: path,
Args: args,
}, processIO)
if err != nil {
return err
}
}
} else {
events <- runtime.Event{
EventType: runtime.StartingEvent,
}
process, err = container.Run(
ctx,
garden.ProcessSpec{
Path: path,
Args: args,
}, processIO)
if err != nil {
return err
}
}
processExited := make(chan struct{})
var processStatus int
var processErr error
go func() {
processStatus, processErr = process.Wait()
close(processExited)
}()
select {
case <-processExited:
if processErr != nil {
return processErr
}
if processStatus != 0 {
return ErrResourceScriptFailed{
Path: path,
Args: args,
ExitStatus: processStatus,
Stderr: stderr.String(),
}
}
if recoverable {
err := container.SetProperty(resourceResultPropertyName, stdout.String())
if err != nil {
return err
}
}
return json.Unmarshal(stdout.Bytes(), output)
case <-ctx.Done():
container.Stop(false)
<-processExited
return ctx.Err()
}
}

View File

@ -3,9 +3,11 @@ package workerfakes
import (
"context"
"io"
"sync"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/runtime"
@ -63,7 +65,30 @@ type FakeClient struct {
result2 bool
result3 error
}
RunTaskStepStub func(context.Context, lager.Logger, lock.LockFactory, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy, db.ContainerMetadata, worker.ImageFetcherSpec, worker.TaskProcessSpec, chan runtime.Event) worker.TaskResult
RunPutStepStub func(context.Context, lager.Logger, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, atc.Source, atc.Params, worker.ContainerPlacementStrategy, db.ContainerMetadata, worker.ImageFetcherSpec, string, worker.ProcessSpec, chan runtime.Event) worker.PutResult
runPutStepMutex sync.RWMutex
runPutStepArgsForCall []struct {
arg1 context.Context
arg2 lager.Logger
arg3 db.ContainerOwner
arg4 worker.ContainerSpec
arg5 worker.WorkerSpec
arg6 atc.Source
arg7 atc.Params
arg8 worker.ContainerPlacementStrategy
arg9 db.ContainerMetadata
arg10 worker.ImageFetcherSpec
arg11 string
arg12 worker.ProcessSpec
arg13 chan runtime.Event
}
runPutStepReturns struct {
result1 worker.PutResult
}
runPutStepReturnsOnCall map[int]struct {
result1 worker.PutResult
}
RunTaskStepStub func(context.Context, lager.Logger, lock.LockFactory, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy, db.ContainerMetadata, worker.ImageFetcherSpec, worker.ProcessSpec, chan runtime.Event) worker.TaskResult
runTaskStepMutex sync.RWMutex
runTaskStepArgsForCall []struct {
arg1 context.Context
@ -75,7 +100,7 @@ type FakeClient struct {
arg7 worker.ContainerPlacementStrategy
arg8 db.ContainerMetadata
arg9 worker.ImageFetcherSpec
arg10 worker.TaskProcessSpec
arg10 worker.ProcessSpec
arg11 chan runtime.Event
}
runTaskStepReturns struct {
@ -84,6 +109,22 @@ type FakeClient struct {
runTaskStepReturnsOnCall map[int]struct {
result1 worker.TaskResult
}
StreamFileFromArtifactStub func(context.Context, lager.Logger, runtime.Artifact, string) (io.ReadCloser, error)
streamFileFromArtifactMutex sync.RWMutex
streamFileFromArtifactArgsForCall []struct {
arg1 context.Context
arg2 lager.Logger
arg3 runtime.Artifact
arg4 string
}
streamFileFromArtifactReturns struct {
result1 io.ReadCloser
result2 error
}
streamFileFromArtifactReturnsOnCall map[int]struct {
result1 io.ReadCloser
result2 error
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
@ -290,7 +331,79 @@ func (fake *FakeClient) FindVolumeReturnsOnCall(i int, result1 worker.Volume, re
}{result1, result2, result3}
}
func (fake *FakeClient) RunTaskStep(arg1 context.Context, arg2 lager.Logger, arg3 lock.LockFactory, arg4 db.ContainerOwner, arg5 worker.ContainerSpec, arg6 worker.WorkerSpec, arg7 worker.ContainerPlacementStrategy, arg8 db.ContainerMetadata, arg9 worker.ImageFetcherSpec, arg10 worker.TaskProcessSpec, arg11 chan runtime.Event) worker.TaskResult {
func (fake *FakeClient) RunPutStep(arg1 context.Context, arg2 lager.Logger, arg3 db.ContainerOwner, arg4 worker.ContainerSpec, arg5 worker.WorkerSpec, arg6 atc.Source, arg7 atc.Params, arg8 worker.ContainerPlacementStrategy, arg9 db.ContainerMetadata, arg10 worker.ImageFetcherSpec, arg11 string, arg12 worker.ProcessSpec, arg13 chan runtime.Event) worker.PutResult {
fake.runPutStepMutex.Lock()
ret, specificReturn := fake.runPutStepReturnsOnCall[len(fake.runPutStepArgsForCall)]
fake.runPutStepArgsForCall = append(fake.runPutStepArgsForCall, struct {
arg1 context.Context
arg2 lager.Logger
arg3 db.ContainerOwner
arg4 worker.ContainerSpec
arg5 worker.WorkerSpec
arg6 atc.Source
arg7 atc.Params
arg8 worker.ContainerPlacementStrategy
arg9 db.ContainerMetadata
arg10 worker.ImageFetcherSpec
arg11 string
arg12 worker.ProcessSpec
arg13 chan runtime.Event
}{arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13})
fake.recordInvocation("RunPutStep", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13})
fake.runPutStepMutex.Unlock()
if fake.RunPutStepStub != nil {
return fake.RunPutStepStub(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13)
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.runPutStepReturns
return fakeReturns.result1
}
func (fake *FakeClient) RunPutStepCallCount() int {
fake.runPutStepMutex.RLock()
defer fake.runPutStepMutex.RUnlock()
return len(fake.runPutStepArgsForCall)
}
func (fake *FakeClient) RunPutStepCalls(stub func(context.Context, lager.Logger, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, atc.Source, atc.Params, worker.ContainerPlacementStrategy, db.ContainerMetadata, worker.ImageFetcherSpec, string, worker.ProcessSpec, chan runtime.Event) worker.PutResult) {
fake.runPutStepMutex.Lock()
defer fake.runPutStepMutex.Unlock()
fake.RunPutStepStub = stub
}
func (fake *FakeClient) RunPutStepArgsForCall(i int) (context.Context, lager.Logger, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, atc.Source, atc.Params, worker.ContainerPlacementStrategy, db.ContainerMetadata, worker.ImageFetcherSpec, string, worker.ProcessSpec, chan runtime.Event) {
fake.runPutStepMutex.RLock()
defer fake.runPutStepMutex.RUnlock()
argsForCall := fake.runPutStepArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7, argsForCall.arg8, argsForCall.arg9, argsForCall.arg10, argsForCall.arg11, argsForCall.arg12, argsForCall.arg13
}
func (fake *FakeClient) RunPutStepReturns(result1 worker.PutResult) {
fake.runPutStepMutex.Lock()
defer fake.runPutStepMutex.Unlock()
fake.RunPutStepStub = nil
fake.runPutStepReturns = struct {
result1 worker.PutResult
}{result1}
}
func (fake *FakeClient) RunPutStepReturnsOnCall(i int, result1 worker.PutResult) {
fake.runPutStepMutex.Lock()
defer fake.runPutStepMutex.Unlock()
fake.RunPutStepStub = nil
if fake.runPutStepReturnsOnCall == nil {
fake.runPutStepReturnsOnCall = make(map[int]struct {
result1 worker.PutResult
})
}
fake.runPutStepReturnsOnCall[i] = struct {
result1 worker.PutResult
}{result1}
}
func (fake *FakeClient) RunTaskStep(arg1 context.Context, arg2 lager.Logger, arg3 lock.LockFactory, arg4 db.ContainerOwner, arg5 worker.ContainerSpec, arg6 worker.WorkerSpec, arg7 worker.ContainerPlacementStrategy, arg8 db.ContainerMetadata, arg9 worker.ImageFetcherSpec, arg10 worker.ProcessSpec, arg11 chan runtime.Event) worker.TaskResult {
fake.runTaskStepMutex.Lock()
ret, specificReturn := fake.runTaskStepReturnsOnCall[len(fake.runTaskStepArgsForCall)]
fake.runTaskStepArgsForCall = append(fake.runTaskStepArgsForCall, struct {
@ -303,7 +416,7 @@ func (fake *FakeClient) RunTaskStep(arg1 context.Context, arg2 lager.Logger, arg
arg7 worker.ContainerPlacementStrategy
arg8 db.ContainerMetadata
arg9 worker.ImageFetcherSpec
arg10 worker.TaskProcessSpec
arg10 worker.ProcessSpec
arg11 chan runtime.Event
}{arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11})
fake.recordInvocation("RunTaskStep", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11})
@ -324,13 +437,13 @@ func (fake *FakeClient) RunTaskStepCallCount() int {
return len(fake.runTaskStepArgsForCall)
}
func (fake *FakeClient) RunTaskStepCalls(stub func(context.Context, lager.Logger, lock.LockFactory, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy, db.ContainerMetadata, worker.ImageFetcherSpec, worker.TaskProcessSpec, chan runtime.Event) worker.TaskResult) {
func (fake *FakeClient) RunTaskStepCalls(stub func(context.Context, lager.Logger, lock.LockFactory, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy, db.ContainerMetadata, worker.ImageFetcherSpec, worker.ProcessSpec, chan runtime.Event) worker.TaskResult) {
fake.runTaskStepMutex.Lock()
defer fake.runTaskStepMutex.Unlock()
fake.RunTaskStepStub = stub
}
func (fake *FakeClient) RunTaskStepArgsForCall(i int) (context.Context, lager.Logger, lock.LockFactory, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy, db.ContainerMetadata, worker.ImageFetcherSpec, worker.TaskProcessSpec, chan runtime.Event) {
func (fake *FakeClient) RunTaskStepArgsForCall(i int) (context.Context, lager.Logger, lock.LockFactory, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy, db.ContainerMetadata, worker.ImageFetcherSpec, worker.ProcessSpec, chan runtime.Event) {
fake.runTaskStepMutex.RLock()
defer fake.runTaskStepMutex.RUnlock()
argsForCall := fake.runTaskStepArgsForCall[i]
@ -360,6 +473,72 @@ func (fake *FakeClient) RunTaskStepReturnsOnCall(i int, result1 worker.TaskResul
}{result1}
}
func (fake *FakeClient) StreamFileFromArtifact(arg1 context.Context, arg2 lager.Logger, arg3 runtime.Artifact, arg4 string) (io.ReadCloser, error) {
fake.streamFileFromArtifactMutex.Lock()
ret, specificReturn := fake.streamFileFromArtifactReturnsOnCall[len(fake.streamFileFromArtifactArgsForCall)]
fake.streamFileFromArtifactArgsForCall = append(fake.streamFileFromArtifactArgsForCall, struct {
arg1 context.Context
arg2 lager.Logger
arg3 runtime.Artifact
arg4 string
}{arg1, arg2, arg3, arg4})
fake.recordInvocation("StreamFileFromArtifact", []interface{}{arg1, arg2, arg3, arg4})
fake.streamFileFromArtifactMutex.Unlock()
if fake.StreamFileFromArtifactStub != nil {
return fake.StreamFileFromArtifactStub(arg1, arg2, arg3, arg4)
}
if specificReturn {
return ret.result1, ret.result2
}
fakeReturns := fake.streamFileFromArtifactReturns
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeClient) StreamFileFromArtifactCallCount() int {
fake.streamFileFromArtifactMutex.RLock()
defer fake.streamFileFromArtifactMutex.RUnlock()
return len(fake.streamFileFromArtifactArgsForCall)
}
func (fake *FakeClient) StreamFileFromArtifactCalls(stub func(context.Context, lager.Logger, runtime.Artifact, string) (io.ReadCloser, error)) {
fake.streamFileFromArtifactMutex.Lock()
defer fake.streamFileFromArtifactMutex.Unlock()
fake.StreamFileFromArtifactStub = stub
}
func (fake *FakeClient) StreamFileFromArtifactArgsForCall(i int) (context.Context, lager.Logger, runtime.Artifact, string) {
fake.streamFileFromArtifactMutex.RLock()
defer fake.streamFileFromArtifactMutex.RUnlock()
argsForCall := fake.streamFileFromArtifactArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
}
func (fake *FakeClient) StreamFileFromArtifactReturns(result1 io.ReadCloser, result2 error) {
fake.streamFileFromArtifactMutex.Lock()
defer fake.streamFileFromArtifactMutex.Unlock()
fake.StreamFileFromArtifactStub = nil
fake.streamFileFromArtifactReturns = struct {
result1 io.ReadCloser
result2 error
}{result1, result2}
}
func (fake *FakeClient) StreamFileFromArtifactReturnsOnCall(i int, result1 io.ReadCloser, result2 error) {
fake.streamFileFromArtifactMutex.Lock()
defer fake.streamFileFromArtifactMutex.Unlock()
fake.StreamFileFromArtifactStub = nil
if fake.streamFileFromArtifactReturnsOnCall == nil {
fake.streamFileFromArtifactReturnsOnCall = make(map[int]struct {
result1 io.ReadCloser
result2 error
})
}
fake.streamFileFromArtifactReturnsOnCall[i] = struct {
result1 io.ReadCloser
result2 error
}{result1, result2}
}
func (fake *FakeClient) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
@ -369,8 +548,12 @@ func (fake *FakeClient) Invocations() map[string][][]interface{} {
defer fake.findContainerMutex.RUnlock()
fake.findVolumeMutex.RLock()
defer fake.findVolumeMutex.RUnlock()
fake.runPutStepMutex.RLock()
defer fake.runPutStepMutex.RUnlock()
fake.runTaskStepMutex.RLock()
defer fake.runTaskStepMutex.RUnlock()
fake.streamFileFromArtifactMutex.RLock()
defer fake.streamFileFromArtifactMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value

View File

@ -0,0 +1,166 @@
// Code generated by counterfeiter. DO NOT EDIT.
package workerfakes
import (
"sync"
"github.com/concourse/concourse/atc/runtime"
"github.com/concourse/concourse/atc/worker"
)
type FakeFooBarInput struct {
ArtifactStub func() runtime.Artifact
artifactMutex sync.RWMutex
artifactArgsForCall []struct {
}
artifactReturns struct {
result1 runtime.Artifact
}
artifactReturnsOnCall map[int]struct {
result1 runtime.Artifact
}
DestinationPathStub func() string
destinationPathMutex sync.RWMutex
destinationPathArgsForCall []struct {
}
destinationPathReturns struct {
result1 string
}
destinationPathReturnsOnCall map[int]struct {
result1 string
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakeFooBarInput) Artifact() runtime.Artifact {
fake.artifactMutex.Lock()
ret, specificReturn := fake.artifactReturnsOnCall[len(fake.artifactArgsForCall)]
fake.artifactArgsForCall = append(fake.artifactArgsForCall, struct {
}{})
fake.recordInvocation("Artifact", []interface{}{})
fake.artifactMutex.Unlock()
if fake.ArtifactStub != nil {
return fake.ArtifactStub()
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.artifactReturns
return fakeReturns.result1
}
func (fake *FakeFooBarInput) ArtifactCallCount() int {
fake.artifactMutex.RLock()
defer fake.artifactMutex.RUnlock()
return len(fake.artifactArgsForCall)
}
func (fake *FakeFooBarInput) ArtifactCalls(stub func() runtime.Artifact) {
fake.artifactMutex.Lock()
defer fake.artifactMutex.Unlock()
fake.ArtifactStub = stub
}
func (fake *FakeFooBarInput) ArtifactReturns(result1 runtime.Artifact) {
fake.artifactMutex.Lock()
defer fake.artifactMutex.Unlock()
fake.ArtifactStub = nil
fake.artifactReturns = struct {
result1 runtime.Artifact
}{result1}
}
func (fake *FakeFooBarInput) ArtifactReturnsOnCall(i int, result1 runtime.Artifact) {
fake.artifactMutex.Lock()
defer fake.artifactMutex.Unlock()
fake.ArtifactStub = nil
if fake.artifactReturnsOnCall == nil {
fake.artifactReturnsOnCall = make(map[int]struct {
result1 runtime.Artifact
})
}
fake.artifactReturnsOnCall[i] = struct {
result1 runtime.Artifact
}{result1}
}
func (fake *FakeFooBarInput) DestinationPath() string {
fake.destinationPathMutex.Lock()
ret, specificReturn := fake.destinationPathReturnsOnCall[len(fake.destinationPathArgsForCall)]
fake.destinationPathArgsForCall = append(fake.destinationPathArgsForCall, struct {
}{})
fake.recordInvocation("DestinationPath", []interface{}{})
fake.destinationPathMutex.Unlock()
if fake.DestinationPathStub != nil {
return fake.DestinationPathStub()
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.destinationPathReturns
return fakeReturns.result1
}
func (fake *FakeFooBarInput) DestinationPathCallCount() int {
fake.destinationPathMutex.RLock()
defer fake.destinationPathMutex.RUnlock()
return len(fake.destinationPathArgsForCall)
}
func (fake *FakeFooBarInput) DestinationPathCalls(stub func() string) {
fake.destinationPathMutex.Lock()
defer fake.destinationPathMutex.Unlock()
fake.DestinationPathStub = stub
}
func (fake *FakeFooBarInput) DestinationPathReturns(result1 string) {
fake.destinationPathMutex.Lock()
defer fake.destinationPathMutex.Unlock()
fake.DestinationPathStub = nil
fake.destinationPathReturns = struct {
result1 string
}{result1}
}
func (fake *FakeFooBarInput) DestinationPathReturnsOnCall(i int, result1 string) {
fake.destinationPathMutex.Lock()
defer fake.destinationPathMutex.Unlock()
fake.DestinationPathStub = nil
if fake.destinationPathReturnsOnCall == nil {
fake.destinationPathReturnsOnCall = make(map[int]struct {
result1 string
})
}
fake.destinationPathReturnsOnCall[i] = struct {
result1 string
}{result1}
}
func (fake *FakeFooBarInput) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.artifactMutex.RLock()
defer fake.artifactMutex.RUnlock()
fake.destinationPathMutex.RLock()
defer fake.destinationPathMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *FakeFooBarInput) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ worker.FooBarInput = new(FakeFooBarInput)

View File

@ -4,7 +4,6 @@ package workerfakes
import (
"sync"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
@ -59,13 +58,12 @@ type FakeWorkerProvider struct {
result1 []worker.Worker
result2 error
}
NewGardenWorkerStub func(lager.Logger, clock.Clock, db.Worker, int) worker.Worker
NewGardenWorkerStub func(lager.Logger, db.Worker, int) worker.Worker
newGardenWorkerMutex sync.RWMutex
newGardenWorkerArgsForCall []struct {
arg1 lager.Logger
arg2 clock.Clock
arg3 db.Worker
arg4 int
arg2 db.Worker
arg3 int
}
newGardenWorkerReturns struct {
result1 worker.Worker
@ -290,19 +288,18 @@ func (fake *FakeWorkerProvider) FindWorkersForContainerByOwnerReturnsOnCall(i in
}{result1, result2}
}
func (fake *FakeWorkerProvider) NewGardenWorker(arg1 lager.Logger, arg2 clock.Clock, arg3 db.Worker, arg4 int) worker.Worker {
func (fake *FakeWorkerProvider) NewGardenWorker(arg1 lager.Logger, arg2 db.Worker, arg3 int) worker.Worker {
fake.newGardenWorkerMutex.Lock()
ret, specificReturn := fake.newGardenWorkerReturnsOnCall[len(fake.newGardenWorkerArgsForCall)]
fake.newGardenWorkerArgsForCall = append(fake.newGardenWorkerArgsForCall, struct {
arg1 lager.Logger
arg2 clock.Clock
arg3 db.Worker
arg4 int
}{arg1, arg2, arg3, arg4})
fake.recordInvocation("NewGardenWorker", []interface{}{arg1, arg2, arg3, arg4})
arg2 db.Worker
arg3 int
}{arg1, arg2, arg3})
fake.recordInvocation("NewGardenWorker", []interface{}{arg1, arg2, arg3})
fake.newGardenWorkerMutex.Unlock()
if fake.NewGardenWorkerStub != nil {
return fake.NewGardenWorkerStub(arg1, arg2, arg3, arg4)
return fake.NewGardenWorkerStub(arg1, arg2, arg3)
}
if specificReturn {
return ret.result1
@ -317,17 +314,17 @@ func (fake *FakeWorkerProvider) NewGardenWorkerCallCount() int {
return len(fake.newGardenWorkerArgsForCall)
}
func (fake *FakeWorkerProvider) NewGardenWorkerCalls(stub func(lager.Logger, clock.Clock, db.Worker, int) worker.Worker) {
func (fake *FakeWorkerProvider) NewGardenWorkerCalls(stub func(lager.Logger, db.Worker, int) worker.Worker) {
fake.newGardenWorkerMutex.Lock()
defer fake.newGardenWorkerMutex.Unlock()
fake.NewGardenWorkerStub = stub
}
func (fake *FakeWorkerProvider) NewGardenWorkerArgsForCall(i int) (lager.Logger, clock.Clock, db.Worker, int) {
func (fake *FakeWorkerProvider) NewGardenWorkerArgsForCall(i int) (lager.Logger, db.Worker, int) {
fake.newGardenWorkerMutex.RLock()
defer fake.newGardenWorkerMutex.RUnlock()
argsForCall := fake.newGardenWorkerArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *FakeWorkerProvider) NewGardenWorkerReturns(result1 worker.Worker) {

View File

@ -11,7 +11,7 @@ import (
"github.com/onsi/gomega/ghttp"
)
var _ = Describe("Artifacts", func() {
var _ = Describe("ArtifactRepository", func() {
Describe("CreateArtifact", func() {
Context("when creating the artifact fails", func() {