Merge pull request #4118 from Pix4D/active_tasks_lock_task_step

Implement new experimental placement strategy: 'limit-active-tasks'
This commit is contained in:
Nader Ziada 2019-07-19 10:33:40 -04:00 committed by GitHub
commit aab8f4a85d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 2056 additions and 184 deletions

View File

@ -115,7 +115,8 @@ type RunCommand struct {
ResourceCheckingInterval time.Duration `long:"resource-checking-interval" default:"1m" description:"Interval on which to check for new versions of resources."`
ResourceTypeCheckingInterval time.Duration `long:"resource-type-checking-interval" default:"1m" description:"Interval on which to check for new versions of resource types."`
ContainerPlacementStrategy string `long:"container-placement-strategy" default:"volume-locality" choice:"volume-locality" choice:"random" choice:"fewest-build-containers" description:"Method by which a worker is selected during container placement."`
ContainerPlacementStrategy string `long:"container-placement-strategy" default:"volume-locality" choice:"volume-locality" choice:"random" choice:"fewest-build-containers" choice:"limit-active-tasks" description:"Method by which a worker is selected during container placement."`
MaxActiveTasksPerWorker int `long:"max-active-tasks-per-worker" default:"0" description:"Maximum allowed number of active build tasks per worker. Has effect only when used with limit-active-tasks placement strategy. 0 means no limit."`
BaggageclaimResponseHeaderTimeout time.Duration `long:"baggageclaim-response-header-timeout" default:"1m" description:"How long to wait for Baggageclaim to send the response header."`
CLIArtifactsDir flag.Dir `long:"cli-artifacts-dir" description:"Directory containing downloadable CLI binaries."`
@ -747,7 +748,10 @@ func (cmd *RunCommand) constructBackendMembers(
return nil, err
}
buildContainerStrategy := cmd.chooseBuildContainerStrategy()
buildContainerStrategy, err := cmd.chooseBuildContainerStrategy()
if err != nil {
return nil, err
}
checkContainerStrategy := worker.NewRandomPlacementStrategy()
engine := cmd.constructEngine(
@ -760,6 +764,7 @@ func (cmd *RunCommand) constructBackendMembers(
defaultLimits,
buildContainerStrategy,
resourceFactory,
lockFactory,
)
radarSchedulerFactory := pipelines.NewRadarSchedulerFactory(
@ -1186,18 +1191,26 @@ func (cmd *RunCommand) constructLockConn(driverName string) (*sql.DB, error) {
return dbConn, nil
}
func (cmd *RunCommand) chooseBuildContainerStrategy() worker.ContainerPlacementStrategy {
func (cmd *RunCommand) chooseBuildContainerStrategy() (worker.ContainerPlacementStrategy, error) {
var strategy worker.ContainerPlacementStrategy
if cmd.ContainerPlacementStrategy != "limit-active-tasks" && cmd.MaxActiveTasksPerWorker != 0 {
return nil, errors.New("max-active-tasks-per-worker has only effect with limit-active-tasks strategy")
}
if cmd.MaxActiveTasksPerWorker < 0 {
return nil, errors.New("max-active-tasks-per-worker must be greater or equal than 0")
}
switch cmd.ContainerPlacementStrategy {
case "random":
strategy = worker.NewRandomPlacementStrategy()
case "fewest-build-containers":
strategy = worker.NewFewestBuildContainersPlacementStrategy()
case "limit-active-tasks":
strategy = worker.NewLimitActiveTasksPlacementStrategy(cmd.MaxActiveTasksPerWorker)
default:
strategy = worker.NewVolumeLocalityPlacementStrategy()
}
return strategy
return strategy, nil
}
func (cmd *RunCommand) configureAuthForDefaultTeam(teamFactory db.TeamFactory) error {
@ -1233,6 +1246,7 @@ func (cmd *RunCommand) constructEngine(
defaultLimits atc.ContainerLimits,
strategy worker.ContainerPlacementStrategy,
resourceFactory resource.ResourceFactory,
lockFactory lock.LockFactory,
) engine.Engine {
stepFactory := builder.NewStepFactory(
@ -1251,6 +1265,7 @@ func (cmd *RunCommand) constructEngine(
stepFactory,
builder.NewDelegateFactory(),
cmd.ExternalURL.String(),
lockFactory,
)
return engine.NewEngine(stepBuilder)

View File

@ -2,9 +2,9 @@
package dbfakes
import (
sync "sync"
"sync"
db "github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db"
)
type FakeTaskCacheFactory struct {

View File

@ -2,10 +2,10 @@
package dbfakes
import (
sync "sync"
time "time"
"sync"
"time"
db "github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db"
)
type FakeVolumeRepository struct {

View File

@ -20,6 +20,18 @@ type FakeWorker struct {
activeContainersReturnsOnCall map[int]struct {
result1 int
}
ActiveTasksStub func() (int, error)
activeTasksMutex sync.RWMutex
activeTasksArgsForCall []struct {
}
activeTasksReturns struct {
result1 int
result2 error
}
activeTasksReturnsOnCall map[int]struct {
result1 int
result2 error
}
ActiveVolumesStub func() int
activeVolumesMutex sync.RWMutex
activeVolumesArgsForCall []struct {
@ -64,6 +76,16 @@ type FakeWorker struct {
result1 db.CreatingContainer
result2 error
}
DecreaseActiveTasksStub func() error
decreaseActiveTasksMutex sync.RWMutex
decreaseActiveTasksArgsForCall []struct {
}
decreaseActiveTasksReturns struct {
result1 error
}
decreaseActiveTasksReturnsOnCall map[int]struct {
result1 error
}
DeleteStub func() error
deleteMutex sync.RWMutex
deleteArgsForCall []struct {
@ -139,6 +161,16 @@ type FakeWorker struct {
hTTPSProxyURLReturnsOnCall map[int]struct {
result1 string
}
IncreaseActiveTasksStub func() error
increaseActiveTasksMutex sync.RWMutex
increaseActiveTasksArgsForCall []struct {
}
increaseActiveTasksReturns struct {
result1 error
}
increaseActiveTasksReturnsOnCall map[int]struct {
result1 error
}
LandStub func() error
landMutex sync.RWMutex
landArgsForCall []struct {
@ -351,6 +383,61 @@ func (fake *FakeWorker) ActiveContainersReturnsOnCall(i int, result1 int) {
}{result1}
}
func (fake *FakeWorker) ActiveTasks() (int, error) {
fake.activeTasksMutex.Lock()
ret, specificReturn := fake.activeTasksReturnsOnCall[len(fake.activeTasksArgsForCall)]
fake.activeTasksArgsForCall = append(fake.activeTasksArgsForCall, struct {
}{})
fake.recordInvocation("ActiveTasks", []interface{}{})
fake.activeTasksMutex.Unlock()
if fake.ActiveTasksStub != nil {
return fake.ActiveTasksStub()
}
if specificReturn {
return ret.result1, ret.result2
}
fakeReturns := fake.activeTasksReturns
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeWorker) ActiveTasksCallCount() int {
fake.activeTasksMutex.RLock()
defer fake.activeTasksMutex.RUnlock()
return len(fake.activeTasksArgsForCall)
}
func (fake *FakeWorker) ActiveTasksCalls(stub func() (int, error)) {
fake.activeTasksMutex.Lock()
defer fake.activeTasksMutex.Unlock()
fake.ActiveTasksStub = stub
}
func (fake *FakeWorker) ActiveTasksReturns(result1 int, result2 error) {
fake.activeTasksMutex.Lock()
defer fake.activeTasksMutex.Unlock()
fake.ActiveTasksStub = nil
fake.activeTasksReturns = struct {
result1 int
result2 error
}{result1, result2}
}
func (fake *FakeWorker) ActiveTasksReturnsOnCall(i int, result1 int, result2 error) {
fake.activeTasksMutex.Lock()
defer fake.activeTasksMutex.Unlock()
fake.ActiveTasksStub = nil
if fake.activeTasksReturnsOnCall == nil {
fake.activeTasksReturnsOnCall = make(map[int]struct {
result1 int
result2 error
})
}
fake.activeTasksReturnsOnCall[i] = struct {
result1 int
result2 error
}{result1, result2}
}
func (fake *FakeWorker) ActiveVolumes() int {
fake.activeVolumesMutex.Lock()
ret, specificReturn := fake.activeVolumesReturnsOnCall[len(fake.activeVolumesArgsForCall)]
@ -571,6 +658,58 @@ func (fake *FakeWorker) CreateContainerReturnsOnCall(i int, result1 db.CreatingC
}{result1, result2}
}
func (fake *FakeWorker) DecreaseActiveTasks() error {
fake.decreaseActiveTasksMutex.Lock()
ret, specificReturn := fake.decreaseActiveTasksReturnsOnCall[len(fake.decreaseActiveTasksArgsForCall)]
fake.decreaseActiveTasksArgsForCall = append(fake.decreaseActiveTasksArgsForCall, struct {
}{})
fake.recordInvocation("DecreaseActiveTasks", []interface{}{})
fake.decreaseActiveTasksMutex.Unlock()
if fake.DecreaseActiveTasksStub != nil {
return fake.DecreaseActiveTasksStub()
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.decreaseActiveTasksReturns
return fakeReturns.result1
}
func (fake *FakeWorker) DecreaseActiveTasksCallCount() int {
fake.decreaseActiveTasksMutex.RLock()
defer fake.decreaseActiveTasksMutex.RUnlock()
return len(fake.decreaseActiveTasksArgsForCall)
}
func (fake *FakeWorker) DecreaseActiveTasksCalls(stub func() error) {
fake.decreaseActiveTasksMutex.Lock()
defer fake.decreaseActiveTasksMutex.Unlock()
fake.DecreaseActiveTasksStub = stub
}
func (fake *FakeWorker) DecreaseActiveTasksReturns(result1 error) {
fake.decreaseActiveTasksMutex.Lock()
defer fake.decreaseActiveTasksMutex.Unlock()
fake.DecreaseActiveTasksStub = nil
fake.decreaseActiveTasksReturns = struct {
result1 error
}{result1}
}
func (fake *FakeWorker) DecreaseActiveTasksReturnsOnCall(i int, result1 error) {
fake.decreaseActiveTasksMutex.Lock()
defer fake.decreaseActiveTasksMutex.Unlock()
fake.DecreaseActiveTasksStub = nil
if fake.decreaseActiveTasksReturnsOnCall == nil {
fake.decreaseActiveTasksReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.decreaseActiveTasksReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *FakeWorker) Delete() error {
fake.deleteMutex.Lock()
ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)]
@ -949,6 +1088,58 @@ func (fake *FakeWorker) HTTPSProxyURLReturnsOnCall(i int, result1 string) {
}{result1}
}
func (fake *FakeWorker) IncreaseActiveTasks() error {
fake.increaseActiveTasksMutex.Lock()
ret, specificReturn := fake.increaseActiveTasksReturnsOnCall[len(fake.increaseActiveTasksArgsForCall)]
fake.increaseActiveTasksArgsForCall = append(fake.increaseActiveTasksArgsForCall, struct {
}{})
fake.recordInvocation("IncreaseActiveTasks", []interface{}{})
fake.increaseActiveTasksMutex.Unlock()
if fake.IncreaseActiveTasksStub != nil {
return fake.IncreaseActiveTasksStub()
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.increaseActiveTasksReturns
return fakeReturns.result1
}
func (fake *FakeWorker) IncreaseActiveTasksCallCount() int {
fake.increaseActiveTasksMutex.RLock()
defer fake.increaseActiveTasksMutex.RUnlock()
return len(fake.increaseActiveTasksArgsForCall)
}
func (fake *FakeWorker) IncreaseActiveTasksCalls(stub func() error) {
fake.increaseActiveTasksMutex.Lock()
defer fake.increaseActiveTasksMutex.Unlock()
fake.IncreaseActiveTasksStub = stub
}
func (fake *FakeWorker) IncreaseActiveTasksReturns(result1 error) {
fake.increaseActiveTasksMutex.Lock()
defer fake.increaseActiveTasksMutex.Unlock()
fake.IncreaseActiveTasksStub = nil
fake.increaseActiveTasksReturns = struct {
result1 error
}{result1}
}
func (fake *FakeWorker) IncreaseActiveTasksReturnsOnCall(i int, result1 error) {
fake.increaseActiveTasksMutex.Lock()
defer fake.increaseActiveTasksMutex.Unlock()
fake.IncreaseActiveTasksStub = nil
if fake.increaseActiveTasksReturnsOnCall == nil {
fake.increaseActiveTasksReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.increaseActiveTasksReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *FakeWorker) Land() error {
fake.landMutex.Lock()
ret, specificReturn := fake.landReturnsOnCall[len(fake.landArgsForCall)]
@ -1743,6 +1934,8 @@ func (fake *FakeWorker) Invocations() map[string][][]interface{} {
defer fake.invocationsMutex.RUnlock()
fake.activeContainersMutex.RLock()
defer fake.activeContainersMutex.RUnlock()
fake.activeTasksMutex.RLock()
defer fake.activeTasksMutex.RUnlock()
fake.activeVolumesMutex.RLock()
defer fake.activeVolumesMutex.RUnlock()
fake.baggageclaimURLMutex.RLock()
@ -1751,6 +1944,8 @@ func (fake *FakeWorker) Invocations() map[string][][]interface{} {
defer fake.certsPathMutex.RUnlock()
fake.createContainerMutex.RLock()
defer fake.createContainerMutex.RUnlock()
fake.decreaseActiveTasksMutex.RLock()
defer fake.decreaseActiveTasksMutex.RUnlock()
fake.deleteMutex.RLock()
defer fake.deleteMutex.RUnlock()
fake.ephemeralMutex.RLock()
@ -1765,6 +1960,8 @@ func (fake *FakeWorker) Invocations() map[string][][]interface{} {
defer fake.hTTPProxyURLMutex.RUnlock()
fake.hTTPSProxyURLMutex.RLock()
defer fake.hTTPSProxyURLMutex.RUnlock()
fake.increaseActiveTasksMutex.RLock()
defer fake.increaseActiveTasksMutex.RUnlock()
fake.landMutex.RLock()
defer fake.landMutex.RUnlock()
fake.nameMutex.RLock()

View File

@ -2,9 +2,9 @@
package dbfakes
import (
sync "sync"
"sync"
db "github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db"
)
type FakeWorkerTaskCacheFactory struct {

View File

@ -20,6 +20,7 @@ const (
LockTypeVolumeCreating
LockTypeContainerCreating
LockTypeDatabaseMigration
LockTypeTaskStep
)
var ErrLostLock = errors.New("lock was lost while held, possibly due to connection breakage")
@ -52,6 +53,10 @@ func NewDatabaseMigrationLockID() LockID {
return LockID{LockTypeDatabaseMigration}
}
func NewTaskStepLockID() LockID {
return LockID{LockTypeTaskStep}
}
//go:generate counterfeiter . LockFactory
type LockFactory interface {

View File

@ -0,0 +1,3 @@
BEGIN;
ALTER TABLE workers DROP COLUMN active_tasks;
COMMIT;

View File

@ -0,0 +1,3 @@
BEGIN;
ALTER TABLE workers ADD COLUMN active_tasks integer DEFAULT 0 CHECK (active_tasks >= 0);
COMMIT;

View File

@ -67,6 +67,10 @@ type Worker interface {
Prune() error
Delete() error
ActiveTasks() (int, error)
IncreaseActiveTasks() error
DecreaseActiveTasks() error
FindContainer(owner ContainerOwner) (CreatingContainer, CreatedContainer, error)
CreateContainer(owner ContainerOwner, meta ContainerMetadata) (CreatingContainer, error)
}
@ -84,6 +88,7 @@ type worker struct {
noProxy string
activeContainers int
activeVolumes int
activeTasks int
resourceTypes []atc.WorkerResourceType
platform string
tags []string
@ -352,3 +357,58 @@ func (worker *worker) findContainer(whereClause sq.Sqlizer) (CreatingContainer,
return creating, created, nil
}
func (worker *worker) ActiveTasks() (int, error) {
err := psql.Select("active_tasks").From("workers").Where(sq.Eq{"name": worker.name}).
RunWith(worker.conn).
QueryRow().
Scan(&worker.activeTasks)
if err != nil {
return 0, err
}
return worker.activeTasks, nil
}
func (worker *worker) IncreaseActiveTasks() error {
result, err := psql.Update("workers").
Set("active_tasks", sq.Expr("active_tasks+1")).
Where(sq.Eq{"name": worker.name}).
RunWith(worker.conn).
Exec()
if err != nil {
return err
}
count, err := result.RowsAffected()
if err != nil {
return err
}
if count == 0 {
return ErrWorkerNotPresent
}
return nil
}
func (worker *worker) DecreaseActiveTasks() error {
result, err := psql.Update("workers").
Set("active_tasks", sq.Expr("active_tasks-1")).
Where(sq.Eq{"name": worker.name}).
RunWith(worker.conn).
Exec()
if err != nil {
return err
}
count, err := result.RowsAffected()
if err != nil {
return err
}
if count == 0 {
return ErrWorkerNotPresent
}
return nil
}

View File

@ -385,4 +385,51 @@ var _ = Describe("Worker", func() {
})
})
})
Describe("Active tasks", func() {
BeforeEach(func() {
var err error
worker, err = workerFactory.SaveWorker(atcWorker, 5*time.Minute)
Expect(err).NotTo(HaveOccurred())
})
Context("when the worker registers", func() {
It("has no active tasks", func() {
at, err := worker.ActiveTasks()
Expect(err).ToNot(HaveOccurred())
Expect(at).To(Equal(0))
})
})
Context("when the active task is increased", func() {
BeforeEach(func() {
err := worker.IncreaseActiveTasks()
Expect(err).ToNot(HaveOccurred())
})
It("increase the active tasks counter", func() {
at, err := worker.ActiveTasks()
Expect(err).ToNot(HaveOccurred())
Expect(at).To(Equal(1))
})
Context("when the active task is decreased", func() {
BeforeEach(func() {
err := worker.DecreaseActiveTasks()
Expect(err).ToNot(HaveOccurred())
})
It("reset the active tasks to 0", func() {
at, err := worker.ActiveTasks()
Expect(err).ToNot(HaveOccurred())
Expect(at).To(Equal(0))
})
})
})
Context("when the active task is decreased below 0", func() {
It("raise an error", func() {
err := worker.DecreaseActiveTasks()
Expect(err).To(HaveOccurred())
})
})
})
})

View File

@ -7,6 +7,7 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/exec"
)
@ -17,7 +18,7 @@ const supportedSchema = "exec.v2"
type StepFactory interface {
GetStep(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.GetDelegate) exec.Step
PutStep(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.PutDelegate) exec.Step
TaskStep(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate) exec.Step
TaskStep(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate, lock.LockFactory) exec.Step
ArtifactInputStep(atc.Plan, db.Build, exec.BuildStepDelegate) exec.Step
ArtifactOutputStep(atc.Plan, db.Build, exec.BuildStepDelegate) exec.Step
}
@ -35,11 +36,13 @@ func NewStepBuilder(
stepFactory StepFactory,
delegateFactory DelegateFactory,
externalURL string,
lockFactory lock.LockFactory,
) *stepBuilder {
return &stepBuilder{
stepFactory: stepFactory,
delegateFactory: delegateFactory,
externalURL: externalURL,
lockFactory: lockFactory,
}
}
@ -47,6 +50,7 @@ type stepBuilder struct {
stepFactory StepFactory
delegateFactory DelegateFactory
externalURL string
lockFactory lock.LockFactory
}
func (builder *stepBuilder) BuildStep(build db.Build) (exec.Step, error) {
@ -300,6 +304,7 @@ func (builder *stepBuilder) buildTaskStep(build db.Build, plan atc.Plan) exec.St
stepMetadata,
containerMetadata,
builder.delegateFactory.TaskDelegate(build, plan.ID),
builder.lockFactory,
)
}

View File

@ -4,6 +4,8 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/dbfakes"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/db/lock/lockfakes"
"github.com/concourse/concourse/atc/engine/builder"
"github.com/concourse/concourse/atc/engine/builder/builderfakes"
"github.com/concourse/concourse/atc/exec"
@ -25,6 +27,8 @@ var _ = Describe("Builder", func() {
fakeStepFactory *builderfakes.FakeStepFactory
fakeDelegateFactory *builderfakes.FakeDelegateFactory
fakeLockDB *lockfakes.FakeLockDB
fakeLockFactory lock.LockFactory
planFactory atc.PlanFactory
stepBuilder StepBuilder
@ -33,11 +37,13 @@ var _ = Describe("Builder", func() {
BeforeEach(func() {
fakeStepFactory = new(builderfakes.FakeStepFactory)
fakeDelegateFactory = new(builderfakes.FakeDelegateFactory)
fakeLockFactory = lock.NewTestLockFactory(fakeLockDB)
stepBuilder = builder.NewStepBuilder(
fakeStepFactory,
fakeDelegateFactory,
"http://example.com",
fakeLockFactory,
)
planFactory = atc.NewPlanFactory(123)
@ -355,7 +361,7 @@ var _ = Describe("Builder", func() {
})
It("constructs nested steps correctly", func() {
plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(0)
plan, stepMetadata, containerMetadata, _, _ := fakeStepFactory.TaskStepArgsForCall(0)
expectedPlan := taskPlan
expectedPlan.Attempts = []int{2, 1}
Expect(plan).To(Equal(expectedPlan))
@ -372,7 +378,7 @@ var _ = Describe("Builder", func() {
Attempt: "2.1",
}))
plan, stepMetadata, containerMetadata, _ = fakeStepFactory.TaskStepArgsForCall(1)
plan, stepMetadata, containerMetadata, _, _ = fakeStepFactory.TaskStepArgsForCall(1)
expectedPlan = taskPlan
expectedPlan.Attempts = []int{2, 2}
Expect(plan).To(Equal(expectedPlan))
@ -442,15 +448,15 @@ var _ = Describe("Builder", func() {
It("constructs nested steps correctly", func() {
Expect(fakeStepFactory.TaskStepCallCount()).To(Equal(6))
_, _, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(0)
_, _, containerMetadata, _, _ := fakeStepFactory.TaskStepArgsForCall(0)
Expect(containerMetadata.Attempt).To(Equal("1"))
_, _, containerMetadata, _ = fakeStepFactory.TaskStepArgsForCall(1)
_, _, containerMetadata, _, _ = fakeStepFactory.TaskStepArgsForCall(1)
Expect(containerMetadata.Attempt).To(Equal("1"))
_, _, containerMetadata, _ = fakeStepFactory.TaskStepArgsForCall(2)
_, _, containerMetadata, _, _ = fakeStepFactory.TaskStepArgsForCall(2)
Expect(containerMetadata.Attempt).To(Equal("1"))
_, _, containerMetadata, _ = fakeStepFactory.TaskStepArgsForCall(3)
_, _, containerMetadata, _, _ = fakeStepFactory.TaskStepArgsForCall(3)
Expect(containerMetadata.Attempt).To(Equal("1"))
_, _, containerMetadata, _ = fakeStepFactory.TaskStepArgsForCall(4)
_, _, containerMetadata, _, _ = fakeStepFactory.TaskStepArgsForCall(4)
Expect(containerMetadata.Attempt).To(Equal("1"))
})
})
@ -498,7 +504,7 @@ var _ = Describe("Builder", func() {
})
It("constructs tasks correctly", func() {
plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(0)
plan, stepMetadata, containerMetadata, _, _ := fakeStepFactory.TaskStepArgsForCall(0)
Expect(plan).To(Equal(expectedPlan))
Expect(stepMetadata).To(Equal(expectedMetadata))
Expect(containerMetadata).To(Equal(db.ContainerMetadata{
@ -645,7 +651,7 @@ var _ = Describe("Builder", func() {
It("constructs the completion hook correctly", func() {
Expect(fakeStepFactory.TaskStepCallCount()).To(Equal(4))
plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(2)
plan, stepMetadata, containerMetadata, _, _ := fakeStepFactory.TaskStepArgsForCall(2)
Expect(plan).To(Equal(completionTaskPlan))
Expect(stepMetadata).To(Equal(expectedMetadata))
Expect(containerMetadata).To(Equal(db.ContainerMetadata{
@ -662,7 +668,7 @@ var _ = Describe("Builder", func() {
It("constructs the failure hook correctly", func() {
Expect(fakeStepFactory.TaskStepCallCount()).To(Equal(4))
plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(0)
plan, stepMetadata, containerMetadata, _, _ := fakeStepFactory.TaskStepArgsForCall(0)
Expect(plan).To(Equal(failureTaskPlan))
Expect(stepMetadata).To(Equal(expectedMetadata))
Expect(containerMetadata).To(Equal(db.ContainerMetadata{
@ -679,7 +685,7 @@ var _ = Describe("Builder", func() {
It("constructs the success hook correctly", func() {
Expect(fakeStepFactory.TaskStepCallCount()).To(Equal(4))
plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(1)
plan, stepMetadata, containerMetadata, _, _ := fakeStepFactory.TaskStepArgsForCall(1)
Expect(plan).To(Equal(successTaskPlan))
Expect(stepMetadata).To(Equal(expectedMetadata))
Expect(containerMetadata).To(Equal(db.ContainerMetadata{
@ -696,7 +702,7 @@ var _ = Describe("Builder", func() {
It("constructs the next step correctly", func() {
Expect(fakeStepFactory.TaskStepCallCount()).To(Equal(4))
plan, stepMetadata, containerMetadata, _ := fakeStepFactory.TaskStepArgsForCall(3)
plan, stepMetadata, containerMetadata, _, _ := fakeStepFactory.TaskStepArgsForCall(3)
Expect(plan).To(Equal(nextTaskPlan))
Expect(stepMetadata).To(Equal(expectedMetadata))
Expect(containerMetadata).To(Equal(db.ContainerMetadata{

View File

@ -6,6 +6,7 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/engine/builder"
"github.com/concourse/concourse/atc/exec"
)
@ -65,13 +66,14 @@ type FakeStepFactory struct {
putStepReturnsOnCall map[int]struct {
result1 exec.Step
}
TaskStepStub func(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate) exec.Step
TaskStepStub func(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate, lock.LockFactory) exec.Step
taskStepMutex sync.RWMutex
taskStepArgsForCall []struct {
arg1 atc.Plan
arg2 exec.StepMetadata
arg3 db.ContainerMetadata
arg4 exec.TaskDelegate
arg5 lock.LockFactory
}
taskStepReturns struct {
result1 exec.Step
@ -333,7 +335,7 @@ func (fake *FakeStepFactory) PutStepReturnsOnCall(i int, result1 exec.Step) {
}{result1}
}
func (fake *FakeStepFactory) TaskStep(arg1 atc.Plan, arg2 exec.StepMetadata, arg3 db.ContainerMetadata, arg4 exec.TaskDelegate) exec.Step {
func (fake *FakeStepFactory) TaskStep(arg1 atc.Plan, arg2 exec.StepMetadata, arg3 db.ContainerMetadata, arg4 exec.TaskDelegate, arg5 lock.LockFactory) exec.Step {
fake.taskStepMutex.Lock()
ret, specificReturn := fake.taskStepReturnsOnCall[len(fake.taskStepArgsForCall)]
fake.taskStepArgsForCall = append(fake.taskStepArgsForCall, struct {
@ -341,11 +343,12 @@ func (fake *FakeStepFactory) TaskStep(arg1 atc.Plan, arg2 exec.StepMetadata, arg
arg2 exec.StepMetadata
arg3 db.ContainerMetadata
arg4 exec.TaskDelegate
}{arg1, arg2, arg3, arg4})
fake.recordInvocation("TaskStep", []interface{}{arg1, arg2, arg3, arg4})
arg5 lock.LockFactory
}{arg1, arg2, arg3, arg4, arg5})
fake.recordInvocation("TaskStep", []interface{}{arg1, arg2, arg3, arg4, arg5})
fake.taskStepMutex.Unlock()
if fake.TaskStepStub != nil {
return fake.TaskStepStub(arg1, arg2, arg3, arg4)
return fake.TaskStepStub(arg1, arg2, arg3, arg4, arg5)
}
if specificReturn {
return ret.result1
@ -360,17 +363,17 @@ func (fake *FakeStepFactory) TaskStepCallCount() int {
return len(fake.taskStepArgsForCall)
}
func (fake *FakeStepFactory) TaskStepCalls(stub func(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate) exec.Step) {
func (fake *FakeStepFactory) TaskStepCalls(stub func(atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate, lock.LockFactory) exec.Step) {
fake.taskStepMutex.Lock()
defer fake.taskStepMutex.Unlock()
fake.TaskStepStub = stub
}
func (fake *FakeStepFactory) TaskStepArgsForCall(i int) (atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate) {
func (fake *FakeStepFactory) TaskStepArgsForCall(i int) (atc.Plan, exec.StepMetadata, db.ContainerMetadata, exec.TaskDelegate, lock.LockFactory) {
fake.taskStepMutex.RLock()
defer fake.taskStepMutex.RUnlock()
argsForCall := fake.taskStepArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5
}
func (fake *FakeStepFactory) TaskStepReturns(result1 exec.Step) {

View File

@ -8,6 +8,7 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/creds"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/resource"
"github.com/concourse/concourse/atc/worker"
@ -102,6 +103,7 @@ func (factory *stepFactory) TaskStep(
stepMetadata exec.StepMetadata,
containerMetadata db.ContainerMetadata,
delegate exec.TaskDelegate,
lockFactory lock.LockFactory,
) exec.Step {
sum := sha1.Sum([]byte(plan.Task.Name))
containerMetadata.WorkingDirectory = filepath.Join("/tmp", "build", fmt.Sprintf("%x", sum[:4]))
@ -116,6 +118,7 @@ func (factory *stepFactory) TaskStep(
factory.strategy,
factory.pool,
delegate,
lockFactory,
)
return exec.LogError(taskStep, delegate)

View File

@ -41,13 +41,6 @@ type FakeGetDelegate struct {
initializingArgsForCall []struct {
arg1 lager.Logger
}
SaveVersionStub func(lager.Logger, string, exec.VersionInfo)
saveVersionMutex sync.RWMutex
saveVersionArgsForCall []struct {
arg1 lager.Logger
arg2 string
arg3 exec.VersionInfo
}
StartingStub func(lager.Logger)
startingMutex sync.RWMutex
startingArgsForCall []struct {
@ -240,39 +233,6 @@ func (fake *FakeGetDelegate) InitializingArgsForCall(i int) lager.Logger {
return argsForCall.arg1
}
func (fake *FakeGetDelegate) SaveVersion(arg1 lager.Logger, arg2 string, arg3 exec.VersionInfo) {
fake.saveVersionMutex.Lock()
fake.saveVersionArgsForCall = append(fake.saveVersionArgsForCall, struct {
arg1 lager.Logger
arg2 string
arg3 exec.VersionInfo
}{arg1, arg2, arg3})
fake.recordInvocation("SaveVersion", []interface{}{arg1, arg2, arg3})
fake.saveVersionMutex.Unlock()
if fake.SaveVersionStub != nil {
fake.SaveVersionStub(arg1, arg2, arg3)
}
}
func (fake *FakeGetDelegate) SaveVersionCallCount() int {
fake.saveVersionMutex.RLock()
defer fake.saveVersionMutex.RUnlock()
return len(fake.saveVersionArgsForCall)
}
func (fake *FakeGetDelegate) SaveVersionCalls(stub func(lager.Logger, string, exec.VersionInfo)) {
fake.saveVersionMutex.Lock()
defer fake.saveVersionMutex.Unlock()
fake.SaveVersionStub = stub
}
func (fake *FakeGetDelegate) SaveVersionArgsForCall(i int) (lager.Logger, string, exec.VersionInfo) {
fake.saveVersionMutex.RLock()
defer fake.saveVersionMutex.RUnlock()
argsForCall := fake.saveVersionArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *FakeGetDelegate) Starting(arg1 lager.Logger) {
fake.startingMutex.Lock()
fake.startingArgsForCall = append(fake.startingArgsForCall, struct {
@ -452,8 +412,6 @@ func (fake *FakeGetDelegate) Invocations() map[string][][]interface{} {
defer fake.imageVersionDeterminedMutex.RUnlock()
fake.initializingMutex.RLock()
defer fake.initializingMutex.RUnlock()
fake.saveVersionMutex.RLock()
defer fake.saveVersionMutex.RUnlock()
fake.startingMutex.RLock()
defer fake.startingMutex.RUnlock()
fake.stderrMutex.RLock()

View File

@ -9,6 +9,7 @@ import (
"path/filepath"
"strconv"
"strings"
"time"
"code.cloudfoundry.org/garden"
"code.cloudfoundry.org/lager"
@ -17,6 +18,7 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/creds"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/worker"
)
@ -75,6 +77,7 @@ type TaskStep struct {
strategy worker.ContainerPlacementStrategy
workerPool worker.Pool
delegate TaskDelegate
lockFactory lock.LockFactory
succeeded bool
}
@ -88,6 +91,7 @@ func NewTaskStep(
strategy worker.ContainerPlacementStrategy,
workerPool worker.Pool,
delegate TaskDelegate,
lockFactory lock.LockFactory,
) Step {
return &TaskStep{
planID: planID,
@ -99,6 +103,7 @@ func NewTaskStep(
strategy: strategy,
workerPool: workerPool,
delegate: delegate,
lockFactory: lockFactory,
}
}
@ -190,16 +195,57 @@ func (step *TaskStep) Run(ctx context.Context, state RunState) error {
owner := db.NewBuildStepContainerOwner(step.metadata.BuildID, step.planID, step.metadata.TeamID)
chosenWorker, err := step.workerPool.FindOrChooseWorkerForContainer(
ctx,
logger,
owner,
containerSpec,
workerSpec,
step.strategy,
)
if err != nil {
return err
var chosenWorker worker.Worker
var activeTasksLock lock.Lock
for {
if step.strategy.ModifiesActiveTasks() {
var acquired bool
activeTasksLock, acquired, err = step.lockFactory.Acquire(logger, lock.NewTaskStepLockID())
if err != nil {
return err
}
if !acquired {
time.Sleep(time.Second)
continue
}
}
chosenWorker, err = step.workerPool.FindOrChooseWorkerForContainer(
ctx,
logger,
owner,
containerSpec,
workerSpec,
step.strategy,
)
if err != nil {
return err
}
if step.strategy.ModifiesActiveTasks() {
if chosenWorker == nil {
logger.Info("no-worker-available")
err = activeTasksLock.Release()
if err != nil {
return err
}
time.Sleep(5 * time.Second)
continue
}
err = chosenWorker.IncreaseActiveTasks()
if err != nil {
logger.Error("Failed to increase active tasks.", err)
}
logger.Info("increase-active-tasks")
err = activeTasksLock.Release()
if err != nil {
return err
}
defer step.decreaseActiveTasks(logger, chosenWorker)
}
break
}
container, err := chosenWorker.FindOrCreateContainer(
@ -410,6 +456,7 @@ func (step *TaskStep) containerSpec(logger lager.Logger, repository *artifact.Re
User: config.Run.User,
Dir: metadata.WorkingDirectory,
Env: step.envForParams(config.Params),
Type: metadata.Type,
Inputs: []worker.InputSource{},
Outputs: worker.OutputPaths{},
@ -607,3 +654,11 @@ func (src *taskCacheSource) StreamFile(logger lager.Logger, filename string) (io
func (src *taskCacheSource) VolumeOn(logger lager.Logger, w worker.Worker) (worker.Volume, bool, error) {
return w.FindVolumeForTaskCache(src.logger, src.teamID, src.jobID, src.stepName, src.path)
}
func (step *TaskStep) decreaseActiveTasks(logger lager.Logger, w worker.Worker) {
logger.Info("decrease-active-tasks")
err := w.DecreaseActiveTasks()
if err != nil {
logger.Error("Error decreasing active tasks.", err)
}
}

View File

@ -16,6 +16,7 @@ import (
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/creds/credsfakes"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/db/lock/lockfakes"
"github.com/concourse/concourse/atc/exec"
"github.com/concourse/concourse/atc/exec/artifact"
"github.com/concourse/concourse/atc/exec/execfakes"
@ -39,6 +40,8 @@ var _ = Describe("TaskStep", func() {
fakeWorker *workerfakes.FakeWorker
fakeStrategy *workerfakes.FakeContainerPlacementStrategy
fakeLockFactory *lockfakes.FakeLockFactory
fakeSecretManager *credsfakes.FakeSecrets
fakeDelegate *execfakes.FakeTaskDelegate
taskPlan *atc.TaskPlan
@ -77,6 +80,11 @@ var _ = Describe("TaskStep", func() {
fakePool = new(workerfakes.FakePool)
fakeStrategy = new(workerfakes.FakeContainerPlacementStrategy)
fakeLock := new(lockfakes.FakeLock)
fakeLockFactory = new(lockfakes.FakeLockFactory)
fakeLockFactory.AcquireReturns(fakeLock, true, nil)
fakeSecretManager = new(credsfakes.FakeSecrets)
fakeSecretManager.GetReturns("super-secret-source", nil, true, nil)
@ -88,6 +96,11 @@ var _ = Describe("TaskStep", func() {
state = new(execfakes.FakeRunState)
state.ArtifactsReturns(repo)
fakeWorker.IncreaseActiveTasksStub = func() error {
fakeWorker.ActiveTasksReturns(1, nil)
return nil
}
uninterpolatedResourceTypes := atc.VersionedResourceTypes{
{
ResourceType: atc.ResourceType{
@ -136,6 +149,7 @@ var _ = Describe("TaskStep", func() {
fakeStrategy,
fakePool,
fakeDelegate,
fakeLockFactory,
)
stepErr = taskStep.Run(ctx, state)
@ -169,6 +183,21 @@ var _ = Describe("TaskStep", func() {
}
})
Context("when 'limit-active-tasks' strategy is chosen and a worker found", func() {
BeforeEach(func() {
fakeWorker.NameReturns("some-worker")
fakePool.FindOrChooseWorkerForContainerReturns(fakeWorker, nil)
fakeContainer := new(workerfakes.FakeContainer)
fakeWorker.FindOrCreateContainerReturns(fakeContainer, nil)
fakeStrategy.ModifiesActiveTasksReturns(true)
})
It("increase the active tasks on the worker", func() {
Expect(fakeWorker.ActiveTasks()).To(Equal(1))
})
})
Context("when the worker is either found or chosen", func() {
BeforeEach(func() {
fakeWorker.NameReturns("some-worker")
@ -176,6 +205,11 @@ var _ = Describe("TaskStep", func() {
fakeContainer := new(workerfakes.FakeContainer)
fakeWorker.FindOrCreateContainerReturns(fakeContainer, nil)
fakeWorker.DecreaseActiveTasksStub = func() error {
fakeWorker.ActiveTasksReturns(0, nil)
return nil
}
})
It("finds or chooses a worker", func() {
@ -203,6 +237,7 @@ var _ = Describe("TaskStep", func() {
},
Dir: "some-artifact-root",
Env: []string{"SECURE=secret-task-param"},
Type: "task",
Inputs: []worker.InputSource{},
Outputs: worker.OutputPaths{},
}))
@ -253,7 +288,6 @@ var _ = Describe("TaskStep", func() {
StepName: "some-step",
}))
cpu := uint64(1024)
memory := uint64(1024)
Expect(containerSpec).To(Equal(worker.ContainerSpec{
@ -275,6 +309,7 @@ var _ = Describe("TaskStep", func() {
},
Dir: "some-artifact-root",
Env: []string{"SECURE=secret-task-param"},
Type: "task",
Inputs: []worker.InputSource{},
Outputs: worker.OutputPaths{},
}))
@ -306,7 +341,6 @@ var _ = Describe("TaskStep", func() {
StepName: "some-step",
}))
Expect(containerSpec).To(Equal(worker.ContainerSpec{
Platform: "some-platform",
Tags: []string{"step", "tags"},
@ -317,6 +351,7 @@ var _ = Describe("TaskStep", func() {
},
Dir: "some-artifact-root",
Env: []string{"SOME=params"},
Type: "task",
Inputs: []worker.InputSource{},
Outputs: worker.OutputPaths{},
}))
@ -1025,6 +1060,16 @@ var _ = Describe("TaskStep", func() {
Expect(taskStep.Succeeded()).To(BeFalse())
})
})
Context("when 'limit-active-tasks' strategy is chosen", func() {
BeforeEach(func() {
fakeStrategy.ModifiesActiveTasksReturns(true)
})
It("decrements the active tasks counter on the worker", func() {
Expect(fakeWorker.ActiveTasks()).To(Equal(0))
})
})
})
Context("when the process is interrupted", func() {
@ -1141,6 +1186,15 @@ var _ = Describe("TaskStep", func() {
Expect(sourceMap).To(ConsistOf(artifactSource1, artifactSource2, artifactSource3))
})
})
Context("when 'limit-active-tasks' strategy is chosen", func() {
BeforeEach(func() {
fakeStrategy.ModifiesActiveTasksReturns(true)
})
It("decrements the active tasks counter on the worker", func() {
Expect(fakeWorker.ActiveTasks()).To(Equal(0))
})
})
})
})
@ -1527,6 +1581,15 @@ var _ = Describe("TaskStep", func() {
Expect(taskStep.Succeeded()).To(BeFalse())
})
})
Context("when 'limit-active-tasks' strategy is chosen", func() {
BeforeEach(func() {
fakeStrategy.ModifiesActiveTasksReturns(true)
})
It("decrements the active tasks counter on the worker", func() {
Expect(fakeWorker.ActiveTasks()).To(Equal(0))
})
})
})
Context("when waiting on the process fails", func() {
@ -1543,6 +1606,15 @@ var _ = Describe("TaskStep", func() {
It("is not successful", func() {
Expect(taskStep.Succeeded()).To(BeFalse())
})
Context("when 'limit-active-tasks' strategy is chosen", func() {
BeforeEach(func() {
fakeStrategy.ModifiesActiveTasksReturns(true)
})
It("decrements the active tasks counter on the worker", func() {
Expect(fakeWorker.ActiveTasks()).To(Equal(0))
})
})
})
Context("when the process is interrupted", func() {
@ -1600,6 +1672,15 @@ var _ = Describe("TaskStep", func() {
sourceMap := repo.AsMap()
Expect(sourceMap).To(BeEmpty())
})
Context("when 'limit-active-tasks' strategy is chosen", func() {
BeforeEach(func() {
fakeStrategy.ModifiesActiveTasksReturns(true)
})
It("decrements the active tasks counter on the worker", func() {
Expect(fakeWorker.ActiveTasks()).To(Equal(0))
})
})
})
Context("when running the task's script fails", func() {
@ -1616,6 +1697,16 @@ var _ = Describe("TaskStep", func() {
It("is not successful", func() {
Expect(taskStep.Succeeded()).To(BeFalse())
})
Context("when 'limit-active-tasks' strategy is chosen", func() {
BeforeEach(func() {
fakeStrategy.ModifiesActiveTasksReturns(true)
})
It("decrements the active tasks counter on the worker", func() {
Expect(fakeWorker.ActiveTasks()).To(Equal(0))
})
})
})
})
})
@ -1634,6 +1725,15 @@ var _ = Describe("TaskStep", func() {
It("is not successful", func() {
Expect(taskStep.Succeeded()).To(BeFalse())
})
Context("when 'limit-active-tasks' strategy is chosen", func() {
BeforeEach(func() {
fakeStrategy.ModifiesActiveTasksReturns(true)
})
It("decrements the active tasks counter on the worker", func() {
Expect(fakeWorker.ActiveTasks()).To(Equal(0))
})
})
})
})

View File

@ -4,7 +4,7 @@ package pipelinesfakes
import (
"sync"
"github.com/concourse/concourse/atc/creds"
"github.com/cloudfoundry/bosh-cli/director/template"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/pipelines"
"github.com/concourse/concourse/atc/radar"
@ -12,12 +12,12 @@ import (
)
type FakeRadarSchedulerFactory struct {
BuildScanRunnerFactoryStub func(db.Pipeline, string, creds.Variables, radar.Notifications) radar.ScanRunnerFactory
BuildScanRunnerFactoryStub func(db.Pipeline, string, template.Variables, radar.Notifications) radar.ScanRunnerFactory
buildScanRunnerFactoryMutex sync.RWMutex
buildScanRunnerFactoryArgsForCall []struct {
arg1 db.Pipeline
arg2 string
arg3 creds.Variables
arg3 template.Variables
arg4 radar.Notifications
}
buildScanRunnerFactoryReturns struct {
@ -41,13 +41,13 @@ type FakeRadarSchedulerFactory struct {
invocationsMutex sync.RWMutex
}
func (fake *FakeRadarSchedulerFactory) BuildScanRunnerFactory(arg1 db.Pipeline, arg2 string, arg3 creds.Variables, arg4 radar.Notifications) radar.ScanRunnerFactory {
func (fake *FakeRadarSchedulerFactory) BuildScanRunnerFactory(arg1 db.Pipeline, arg2 string, arg3 template.Variables, arg4 radar.Notifications) radar.ScanRunnerFactory {
fake.buildScanRunnerFactoryMutex.Lock()
ret, specificReturn := fake.buildScanRunnerFactoryReturnsOnCall[len(fake.buildScanRunnerFactoryArgsForCall)]
fake.buildScanRunnerFactoryArgsForCall = append(fake.buildScanRunnerFactoryArgsForCall, struct {
arg1 db.Pipeline
arg2 string
arg3 creds.Variables
arg3 template.Variables
arg4 radar.Notifications
}{arg1, arg2, arg3, arg4})
fake.recordInvocation("BuildScanRunnerFactory", []interface{}{arg1, arg2, arg3, arg4})
@ -68,13 +68,13 @@ func (fake *FakeRadarSchedulerFactory) BuildScanRunnerFactoryCallCount() int {
return len(fake.buildScanRunnerFactoryArgsForCall)
}
func (fake *FakeRadarSchedulerFactory) BuildScanRunnerFactoryCalls(stub func(db.Pipeline, string, creds.Variables, radar.Notifications) radar.ScanRunnerFactory) {
func (fake *FakeRadarSchedulerFactory) BuildScanRunnerFactoryCalls(stub func(db.Pipeline, string, template.Variables, radar.Notifications) radar.ScanRunnerFactory) {
fake.buildScanRunnerFactoryMutex.Lock()
defer fake.buildScanRunnerFactoryMutex.Unlock()
fake.BuildScanRunnerFactoryStub = stub
}
func (fake *FakeRadarSchedulerFactory) BuildScanRunnerFactoryArgsForCall(i int) (db.Pipeline, string, creds.Variables, radar.Notifications) {
func (fake *FakeRadarSchedulerFactory) BuildScanRunnerFactoryArgsForCall(i int) (db.Pipeline, string, template.Variables, radar.Notifications) {
fake.buildScanRunnerFactoryMutex.RLock()
defer fake.buildScanRunnerFactoryMutex.RUnlock()
argsForCall := fake.buildScanRunnerFactoryArgsForCall[i]

View File

@ -6,6 +6,7 @@ import (
"code.cloudfoundry.org/garden"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
)
type WorkerSpec struct {
@ -22,6 +23,7 @@ type ContainerSpec struct {
TeamID int
ImageSpec ImageSpec
Env []string
Type db.ContainerType
// Working directory for processes run in the container.
Dir string

View File

@ -2,15 +2,15 @@
package imagefakes
import (
context "context"
io "io"
sync "sync"
"context"
"io"
"sync"
lager "code.cloudfoundry.org/lager"
atc "github.com/concourse/concourse/atc"
db "github.com/concourse/concourse/atc/db"
worker "github.com/concourse/concourse/atc/worker"
image "github.com/concourse/concourse/atc/worker/image"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/worker/image"
)
type FakeImageResourceFetcher struct {
@ -65,6 +65,12 @@ func (fake *FakeImageResourceFetcher) FetchCallCount() int {
return len(fake.fetchArgsForCall)
}
func (fake *FakeImageResourceFetcher) FetchCalls(stub func(context.Context, lager.Logger, db.CreatingContainer, bool) (worker.Volume, io.ReadCloser, atc.Version, error)) {
fake.fetchMutex.Lock()
defer fake.fetchMutex.Unlock()
fake.FetchStub = stub
}
func (fake *FakeImageResourceFetcher) FetchArgsForCall(i int) (context.Context, lager.Logger, db.CreatingContainer, bool) {
fake.fetchMutex.RLock()
defer fake.fetchMutex.RUnlock()
@ -73,6 +79,8 @@ func (fake *FakeImageResourceFetcher) FetchArgsForCall(i int) (context.Context,
}
func (fake *FakeImageResourceFetcher) FetchReturns(result1 worker.Volume, result2 io.ReadCloser, result3 atc.Version, result4 error) {
fake.fetchMutex.Lock()
defer fake.fetchMutex.Unlock()
fake.FetchStub = nil
fake.fetchReturns = struct {
result1 worker.Volume
@ -83,6 +91,8 @@ func (fake *FakeImageResourceFetcher) FetchReturns(result1 worker.Volume, result
}
func (fake *FakeImageResourceFetcher) FetchReturnsOnCall(i int, result1 worker.Volume, result2 io.ReadCloser, result3 atc.Version, result4 error) {
fake.fetchMutex.Lock()
defer fake.fetchMutex.Unlock()
fake.FetchStub = nil
if fake.fetchReturnsOnCall == nil {
fake.fetchReturnsOnCall = make(map[int]struct {

View File

@ -2,11 +2,11 @@
package imagefakes
import (
sync "sync"
"sync"
atc "github.com/concourse/concourse/atc"
worker "github.com/concourse/concourse/atc/worker"
image "github.com/concourse/concourse/atc/worker/image"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/worker/image"
)
type FakeImageResourceFetcherFactory struct {
@ -59,6 +59,12 @@ func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherCallCount()
return len(fake.newImageResourceFetcherArgsForCall)
}
func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherCalls(stub func(worker.Worker, worker.ImageResource, atc.Version, int, atc.VersionedResourceTypes, worker.ImageFetchingDelegate) image.ImageResourceFetcher) {
fake.newImageResourceFetcherMutex.Lock()
defer fake.newImageResourceFetcherMutex.Unlock()
fake.NewImageResourceFetcherStub = stub
}
func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherArgsForCall(i int) (worker.Worker, worker.ImageResource, atc.Version, int, atc.VersionedResourceTypes, worker.ImageFetchingDelegate) {
fake.newImageResourceFetcherMutex.RLock()
defer fake.newImageResourceFetcherMutex.RUnlock()
@ -67,6 +73,8 @@ func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherArgsForCall(
}
func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherReturns(result1 image.ImageResourceFetcher) {
fake.newImageResourceFetcherMutex.Lock()
defer fake.newImageResourceFetcherMutex.Unlock()
fake.NewImageResourceFetcherStub = nil
fake.newImageResourceFetcherReturns = struct {
result1 image.ImageResourceFetcher
@ -74,6 +82,8 @@ func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherReturns(resu
}
func (fake *FakeImageResourceFetcherFactory) NewImageResourceFetcherReturnsOnCall(i int, result1 image.ImageResourceFetcher) {
fake.newImageResourceFetcherMutex.Lock()
defer fake.newImageResourceFetcherMutex.Unlock()
fake.NewImageResourceFetcherStub = nil
if fake.newImageResourceFetcherReturnsOnCall == nil {
fake.newImageResourceFetcherReturnsOnCall = make(map[int]struct {

View File

@ -5,12 +5,14 @@ import (
"time"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/db"
)
type ContainerPlacementStrategy interface {
//TODO: Don't pass around container metadata since it's not guaranteed to be deterministic.
// Change this after check containers stop being reused
Choose(lager.Logger, []Worker, ContainerSpec) (Worker, error)
ModifiesActiveTasks() bool
}
type VolumeLocalityPlacementStrategy struct {
@ -52,6 +54,10 @@ func (strategy *VolumeLocalityPlacementStrategy) Choose(logger lager.Logger, wor
return highestLocalityWorkers[strategy.rand.Intn(len(highestLocalityWorkers))], nil
}
func (strategy *VolumeLocalityPlacementStrategy) ModifiesActiveTasks() bool {
return false
}
type FewestBuildContainersPlacementStrategy struct {
rand *rand.Rand
}
@ -78,6 +84,56 @@ func (strategy *FewestBuildContainersPlacementStrategy) Choose(logger lager.Logg
return leastBusyWorkers[strategy.rand.Intn(len(leastBusyWorkers))], nil
}
func (strategy *FewestBuildContainersPlacementStrategy) ModifiesActiveTasks() bool {
return false
}
type LimitActiveTasksPlacementStrategy struct {
rand *rand.Rand
maxTasks int
}
func NewLimitActiveTasksPlacementStrategy(maxTasks int) ContainerPlacementStrategy {
return &LimitActiveTasksPlacementStrategy{
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
maxTasks: maxTasks,
}
}
func (strategy *LimitActiveTasksPlacementStrategy) Choose(logger lager.Logger, workers []Worker, spec ContainerSpec) (Worker, error) {
workersByWork := map[int][]Worker{}
minActiveTasks := -1
for _, w := range workers {
activeTasks, err := w.ActiveTasks()
if err != nil {
logger.Error("Cannot retrive active tasks on worker. Skipping.", err)
continue
}
// If maxTasks == 0 or the step is not a task, ignore the number of active tasks and distribute the work evenly
if strategy.maxTasks > 0 && activeTasks >= strategy.maxTasks && spec.Type == db.ContainerTypeTask {
logger.Info("worker-busy")
continue
}
workersByWork[activeTasks] = append(workersByWork[activeTasks], w)
if minActiveTasks == -1 || activeTasks < minActiveTasks {
minActiveTasks = activeTasks
}
}
leastBusyWorkers := workersByWork[minActiveTasks]
if len(leastBusyWorkers) < 1 {
return nil, nil
}
return leastBusyWorkers[strategy.rand.Intn(len(leastBusyWorkers))], nil
}
func (strategy *LimitActiveTasksPlacementStrategy) ModifiesActiveTasks() bool {
return true
}
type RandomPlacementStrategy struct {
rand *rand.Rand
}
@ -91,3 +147,7 @@ func NewRandomPlacementStrategy() ContainerPlacementStrategy {
func (strategy *RandomPlacementStrategy) Choose(logger lager.Logger, workers []Worker, spec ContainerSpec) (Worker, error) {
return workers[strategy.rand.Intn(len(workers))], nil
}
func (strategy *RandomPlacementStrategy) ModifiesActiveTasks() bool {
return false
}

View File

@ -305,3 +305,148 @@ var _ = Describe("RandomPlacementStrategy", func() {
})
})
})
var _ = Describe("LimitActiveTasksPlacementStrategy", func() {
Describe("Choose", func() {
var compatibleWorker1 *workerfakes.FakeWorker
var compatibleWorker2 *workerfakes.FakeWorker
var compatibleWorker3 *workerfakes.FakeWorker
BeforeEach(func() {
logger = lagertest.NewTestLogger("active-tasks-equal-placement-test")
strategy = NewLimitActiveTasksPlacementStrategy(0)
compatibleWorker1 = new(workerfakes.FakeWorker)
compatibleWorker2 = new(workerfakes.FakeWorker)
compatibleWorker3 = new(workerfakes.FakeWorker)
spec = ContainerSpec{
ImageSpec: ImageSpec{ResourceType: "some-type"},
Type: "task",
TeamID: 4567,
Inputs: []InputSource{},
}
})
Context("when there is only one worker with any amount of running tasks", func() {
BeforeEach(func() {
workers = []Worker{compatibleWorker1}
compatibleWorker1.ActiveTasksReturns(42, nil)
})
It("picks that worker", func() {
chosenWorker, chooseErr = strategy.Choose(
logger,
workers,
spec,
)
Expect(chooseErr).ToNot(HaveOccurred())
Expect(chosenWorker).To(Equal(compatibleWorker1))
})
})
Context("when there are multiple workers", func() {
BeforeEach(func() {
workers = []Worker{compatibleWorker1, compatibleWorker2, compatibleWorker3}
compatibleWorker1.ActiveTasksReturns(2, nil)
compatibleWorker2.ActiveTasksReturns(1, nil)
compatibleWorker3.ActiveTasksReturns(2, nil)
})
It("a task picks the one with least amount of active tasks", func() {
Consistently(func() Worker {
chosenWorker, chooseErr = strategy.Choose(
logger,
workers,
spec,
)
Expect(chooseErr).ToNot(HaveOccurred())
return chosenWorker
}).Should(Equal(compatibleWorker2))
})
Context("when all the workers have the same number of active tasks", func() {
BeforeEach(func() {
workers = []Worker{compatibleWorker1, compatibleWorker2, compatibleWorker3}
compatibleWorker1.ActiveTasksReturns(1, nil)
compatibleWorker3.ActiveTasksReturns(1, nil)
})
It("a task picks any of them", func() {
Consistently(func() Worker {
chosenWorker, chooseErr = strategy.Choose(
logger,
workers,
spec,
)
Expect(chooseErr).ToNot(HaveOccurred())
return chosenWorker
}).Should(Or(Equal(compatibleWorker1), Equal(compatibleWorker3)))
})
})
})
Context("when max-tasks-per-worker is set to 1", func() {
BeforeEach(func() {
strategy = NewLimitActiveTasksPlacementStrategy(1)
})
Context("when there are multiple workers", func() {
BeforeEach(func() {
workers = []Worker{compatibleWorker1, compatibleWorker2, compatibleWorker3}
compatibleWorker1.ActiveTasksReturns(1, nil)
compatibleWorker2.ActiveTasksReturns(0, nil)
compatibleWorker3.ActiveTasksReturns(1, nil)
})
It("picks the worker with no active tasks", func() {
chosenWorker, chooseErr = strategy.Choose(
logger,
workers,
spec,
)
Expect(chooseErr).ToNot(HaveOccurred())
Expect(chosenWorker).To(Equal(compatibleWorker2))
})
})
Context("when all workers have active tasks", func() {
BeforeEach(func() {
workers = []Worker{compatibleWorker1, compatibleWorker2, compatibleWorker3}
compatibleWorker1.ActiveTasksReturns(1, nil)
compatibleWorker2.ActiveTasksReturns(1, nil)
compatibleWorker3.ActiveTasksReturns(1, nil)
})
It("picks no worker", func() {
chosenWorker, chooseErr = strategy.Choose(
logger,
workers,
spec,
)
Expect(chooseErr).ToNot(HaveOccurred())
Expect(chosenWorker).To(BeNil())
})
Context("when the container is not of type 'task'", func() {
BeforeEach(func() {
spec.Type = ""
})
It("picks any worker", func() {
Consistently(func() Worker {
chosenWorker, chooseErr = strategy.Choose(
logger,
workers,
spec,
)
Expect(chooseErr).ToNot(HaveOccurred())
return chosenWorker
}).Should(Or(Equal(compatibleWorker1), Equal(compatibleWorker3)))
})
})
})
})
})
})

View File

@ -2,9 +2,9 @@
package transportfakes
import (
sync "sync"
"sync"
transport "github.com/concourse/concourse/atc/worker/transport"
"github.com/concourse/concourse/atc/worker/transport"
)
type FakeReadCloser struct {
@ -58,7 +58,15 @@ func (fake *FakeReadCloser) CloseCallCount() int {
return len(fake.closeArgsForCall)
}
func (fake *FakeReadCloser) CloseCalls(stub func() error) {
fake.closeMutex.Lock()
defer fake.closeMutex.Unlock()
fake.CloseStub = stub
}
func (fake *FakeReadCloser) CloseReturns(result1 error) {
fake.closeMutex.Lock()
defer fake.closeMutex.Unlock()
fake.CloseStub = nil
fake.closeReturns = struct {
result1 error
@ -66,6 +74,8 @@ func (fake *FakeReadCloser) CloseReturns(result1 error) {
}
func (fake *FakeReadCloser) CloseReturnsOnCall(i int, result1 error) {
fake.closeMutex.Lock()
defer fake.closeMutex.Unlock()
fake.CloseStub = nil
if fake.closeReturnsOnCall == nil {
fake.closeReturnsOnCall = make(map[int]struct {
@ -106,6 +116,12 @@ func (fake *FakeReadCloser) ReadCallCount() int {
return len(fake.readArgsForCall)
}
func (fake *FakeReadCloser) ReadCalls(stub func([]byte) (int, error)) {
fake.readMutex.Lock()
defer fake.readMutex.Unlock()
fake.ReadStub = stub
}
func (fake *FakeReadCloser) ReadArgsForCall(i int) []byte {
fake.readMutex.RLock()
defer fake.readMutex.RUnlock()
@ -114,6 +130,8 @@ func (fake *FakeReadCloser) ReadArgsForCall(i int) []byte {
}
func (fake *FakeReadCloser) ReadReturns(result1 int, result2 error) {
fake.readMutex.Lock()
defer fake.readMutex.Unlock()
fake.ReadStub = nil
fake.readReturns = struct {
result1 int
@ -122,6 +140,8 @@ func (fake *FakeReadCloser) ReadReturns(result1 int, result2 error) {
}
func (fake *FakeReadCloser) ReadReturnsOnCall(i int, result1 int, result2 error) {
fake.readMutex.Lock()
defer fake.readMutex.Unlock()
fake.ReadStub = nil
if fake.readReturnsOnCall == nil {
fake.readReturnsOnCall = make(map[int]struct {

View File

@ -2,12 +2,12 @@
package transportfakes
import (
io "io"
http "net/http"
sync "sync"
"io"
"net/http"
"sync"
transport "github.com/concourse/concourse/atc/worker/transport"
rata "github.com/tedsuo/rata"
"github.com/concourse/concourse/atc/worker/transport"
"github.com/tedsuo/rata"
)
type FakeRequestGenerator struct {
@ -56,6 +56,12 @@ func (fake *FakeRequestGenerator) CreateRequestCallCount() int {
return len(fake.createRequestArgsForCall)
}
func (fake *FakeRequestGenerator) CreateRequestCalls(stub func(string, rata.Params, io.Reader) (*http.Request, error)) {
fake.createRequestMutex.Lock()
defer fake.createRequestMutex.Unlock()
fake.CreateRequestStub = stub
}
func (fake *FakeRequestGenerator) CreateRequestArgsForCall(i int) (string, rata.Params, io.Reader) {
fake.createRequestMutex.RLock()
defer fake.createRequestMutex.RUnlock()
@ -64,6 +70,8 @@ func (fake *FakeRequestGenerator) CreateRequestArgsForCall(i int) (string, rata.
}
func (fake *FakeRequestGenerator) CreateRequestReturns(result1 *http.Request, result2 error) {
fake.createRequestMutex.Lock()
defer fake.createRequestMutex.Unlock()
fake.CreateRequestStub = nil
fake.createRequestReturns = struct {
result1 *http.Request
@ -72,6 +80,8 @@ func (fake *FakeRequestGenerator) CreateRequestReturns(result1 *http.Request, re
}
func (fake *FakeRequestGenerator) CreateRequestReturnsOnCall(i int, result1 *http.Request, result2 error) {
fake.createRequestMutex.Lock()
defer fake.createRequestMutex.Unlock()
fake.CreateRequestStub = nil
if fake.createRequestReturnsOnCall == nil {
fake.createRequestReturnsOnCall = make(map[int]struct {

View File

@ -2,10 +2,10 @@
package transportfakes
import (
http "net/http"
sync "sync"
"net/http"
"sync"
transport "github.com/concourse/concourse/atc/worker/transport"
"github.com/concourse/concourse/atc/worker/transport"
)
type FakeRoundTripper struct {
@ -50,6 +50,12 @@ func (fake *FakeRoundTripper) RoundTripCallCount() int {
return len(fake.roundTripArgsForCall)
}
func (fake *FakeRoundTripper) RoundTripCalls(stub func(*http.Request) (*http.Response, error)) {
fake.roundTripMutex.Lock()
defer fake.roundTripMutex.Unlock()
fake.RoundTripStub = stub
}
func (fake *FakeRoundTripper) RoundTripArgsForCall(i int) *http.Request {
fake.roundTripMutex.RLock()
defer fake.roundTripMutex.RUnlock()
@ -58,6 +64,8 @@ func (fake *FakeRoundTripper) RoundTripArgsForCall(i int) *http.Request {
}
func (fake *FakeRoundTripper) RoundTripReturns(result1 *http.Response, result2 error) {
fake.roundTripMutex.Lock()
defer fake.roundTripMutex.Unlock()
fake.RoundTripStub = nil
fake.roundTripReturns = struct {
result1 *http.Response
@ -66,6 +74,8 @@ func (fake *FakeRoundTripper) RoundTripReturns(result1 *http.Response, result2 e
}
func (fake *FakeRoundTripper) RoundTripReturnsOnCall(i int, result1 *http.Response, result2 error) {
fake.roundTripMutex.Lock()
defer fake.roundTripMutex.Unlock()
fake.RoundTripStub = nil
if fake.roundTripReturnsOnCall == nil {
fake.roundTripReturnsOnCall = make(map[int]struct {

View File

@ -2,10 +2,10 @@
package transportfakes
import (
sync "sync"
"sync"
db "github.com/concourse/concourse/atc/db"
transport "github.com/concourse/concourse/atc/worker/transport"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker/transport"
)
type FakeTransportDB struct {
@ -52,6 +52,12 @@ func (fake *FakeTransportDB) GetWorkerCallCount() int {
return len(fake.getWorkerArgsForCall)
}
func (fake *FakeTransportDB) GetWorkerCalls(stub func(string) (db.Worker, bool, error)) {
fake.getWorkerMutex.Lock()
defer fake.getWorkerMutex.Unlock()
fake.GetWorkerStub = stub
}
func (fake *FakeTransportDB) GetWorkerArgsForCall(i int) string {
fake.getWorkerMutex.RLock()
defer fake.getWorkerMutex.RUnlock()
@ -60,6 +66,8 @@ func (fake *FakeTransportDB) GetWorkerArgsForCall(i int) string {
}
func (fake *FakeTransportDB) GetWorkerReturns(result1 db.Worker, result2 bool, result3 error) {
fake.getWorkerMutex.Lock()
defer fake.getWorkerMutex.Unlock()
fake.GetWorkerStub = nil
fake.getWorkerReturns = struct {
result1 db.Worker
@ -69,6 +77,8 @@ func (fake *FakeTransportDB) GetWorkerReturns(result1 db.Worker, result2 bool, r
}
func (fake *FakeTransportDB) GetWorkerReturnsOnCall(i int, result1 db.Worker, result2 bool, result3 error) {
fake.getWorkerMutex.Lock()
defer fake.getWorkerMutex.Unlock()
fake.GetWorkerStub = nil
if fake.getWorkerReturnsOnCall == nil {
fake.getWorkerReturnsOnCall = make(map[int]struct {

View File

@ -65,6 +65,10 @@ type Worker interface {
CreateVolume(logger lager.Logger, spec VolumeSpec, teamID int, volumeType db.VolumeType) (Volume, error)
GardenClient() garden.Client
ActiveTasks() (int, error)
IncreaseActiveTasks() error
DecreaseActiveTasks() error
}
type gardenWorker struct {
@ -767,3 +771,13 @@ insert_coin:
return true
}
func (worker *gardenWorker) ActiveTasks() (int, error) {
return worker.dbWorker.ActiveTasks()
}
func (worker *gardenWorker) IncreaseActiveTasks() error {
return worker.dbWorker.IncreaseActiveTasks()
}
func (worker *gardenWorker) DecreaseActiveTasks() error {
return worker.dbWorker.DecreaseActiveTasks()
}

View File

@ -2,10 +2,10 @@
package workerfakes
import (
io "io"
sync "sync"
"io"
"sync"
worker "github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/worker"
)
type FakeArtifactDestination struct {
@ -50,6 +50,12 @@ func (fake *FakeArtifactDestination) StreamInCallCount() int {
return len(fake.streamInArgsForCall)
}
func (fake *FakeArtifactDestination) StreamInCalls(stub func(string, io.Reader) error) {
fake.streamInMutex.Lock()
defer fake.streamInMutex.Unlock()
fake.StreamInStub = stub
}
func (fake *FakeArtifactDestination) StreamInArgsForCall(i int) (string, io.Reader) {
fake.streamInMutex.RLock()
defer fake.streamInMutex.RUnlock()
@ -58,6 +64,8 @@ func (fake *FakeArtifactDestination) StreamInArgsForCall(i int) (string, io.Read
}
func (fake *FakeArtifactDestination) StreamInReturns(result1 error) {
fake.streamInMutex.Lock()
defer fake.streamInMutex.Unlock()
fake.StreamInStub = nil
fake.streamInReturns = struct {
result1 error
@ -65,6 +73,8 @@ func (fake *FakeArtifactDestination) StreamInReturns(result1 error) {
}
func (fake *FakeArtifactDestination) StreamInReturnsOnCall(i int, result1 error) {
fake.streamInMutex.Lock()
defer fake.streamInMutex.Unlock()
fake.StreamInStub = nil
if fake.streamInReturnsOnCall == nil {
fake.streamInReturnsOnCall = make(map[int]struct {

View File

@ -2,11 +2,11 @@
package workerfakes
import (
io "io"
sync "sync"
"io"
"sync"
lager "code.cloudfoundry.org/lager"
worker "github.com/concourse/concourse/atc/worker"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/worker"
)
type FakeArtifactSource struct {
@ -81,6 +81,12 @@ func (fake *FakeArtifactSource) StreamFileCallCount() int {
return len(fake.streamFileArgsForCall)
}
func (fake *FakeArtifactSource) StreamFileCalls(stub func(lager.Logger, string) (io.ReadCloser, error)) {
fake.streamFileMutex.Lock()
defer fake.streamFileMutex.Unlock()
fake.StreamFileStub = stub
}
func (fake *FakeArtifactSource) StreamFileArgsForCall(i int) (lager.Logger, string) {
fake.streamFileMutex.RLock()
defer fake.streamFileMutex.RUnlock()
@ -89,6 +95,8 @@ func (fake *FakeArtifactSource) StreamFileArgsForCall(i int) (lager.Logger, stri
}
func (fake *FakeArtifactSource) StreamFileReturns(result1 io.ReadCloser, result2 error) {
fake.streamFileMutex.Lock()
defer fake.streamFileMutex.Unlock()
fake.StreamFileStub = nil
fake.streamFileReturns = struct {
result1 io.ReadCloser
@ -97,6 +105,8 @@ func (fake *FakeArtifactSource) StreamFileReturns(result1 io.ReadCloser, result2
}
func (fake *FakeArtifactSource) StreamFileReturnsOnCall(i int, result1 io.ReadCloser, result2 error) {
fake.streamFileMutex.Lock()
defer fake.streamFileMutex.Unlock()
fake.StreamFileStub = nil
if fake.streamFileReturnsOnCall == nil {
fake.streamFileReturnsOnCall = make(map[int]struct {
@ -135,6 +145,12 @@ func (fake *FakeArtifactSource) StreamToCallCount() int {
return len(fake.streamToArgsForCall)
}
func (fake *FakeArtifactSource) StreamToCalls(stub func(lager.Logger, worker.ArtifactDestination) error) {
fake.streamToMutex.Lock()
defer fake.streamToMutex.Unlock()
fake.StreamToStub = stub
}
func (fake *FakeArtifactSource) StreamToArgsForCall(i int) (lager.Logger, worker.ArtifactDestination) {
fake.streamToMutex.RLock()
defer fake.streamToMutex.RUnlock()
@ -143,6 +159,8 @@ func (fake *FakeArtifactSource) StreamToArgsForCall(i int) (lager.Logger, worker
}
func (fake *FakeArtifactSource) StreamToReturns(result1 error) {
fake.streamToMutex.Lock()
defer fake.streamToMutex.Unlock()
fake.StreamToStub = nil
fake.streamToReturns = struct {
result1 error
@ -150,6 +168,8 @@ func (fake *FakeArtifactSource) StreamToReturns(result1 error) {
}
func (fake *FakeArtifactSource) StreamToReturnsOnCall(i int, result1 error) {
fake.streamToMutex.Lock()
defer fake.streamToMutex.Unlock()
fake.StreamToStub = nil
if fake.streamToReturnsOnCall == nil {
fake.streamToReturnsOnCall = make(map[int]struct {
@ -186,6 +206,12 @@ func (fake *FakeArtifactSource) VolumeOnCallCount() int {
return len(fake.volumeOnArgsForCall)
}
func (fake *FakeArtifactSource) VolumeOnCalls(stub func(lager.Logger, worker.Worker) (worker.Volume, bool, error)) {
fake.volumeOnMutex.Lock()
defer fake.volumeOnMutex.Unlock()
fake.VolumeOnStub = stub
}
func (fake *FakeArtifactSource) VolumeOnArgsForCall(i int) (lager.Logger, worker.Worker) {
fake.volumeOnMutex.RLock()
defer fake.volumeOnMutex.RUnlock()
@ -194,6 +220,8 @@ func (fake *FakeArtifactSource) VolumeOnArgsForCall(i int) (lager.Logger, worker
}
func (fake *FakeArtifactSource) VolumeOnReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.volumeOnMutex.Lock()
defer fake.volumeOnMutex.Unlock()
fake.VolumeOnStub = nil
fake.volumeOnReturns = struct {
result1 worker.Volume
@ -203,6 +231,8 @@ func (fake *FakeArtifactSource) VolumeOnReturns(result1 worker.Volume, result2 b
}
func (fake *FakeArtifactSource) VolumeOnReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.volumeOnMutex.Lock()
defer fake.volumeOnMutex.Unlock()
fake.VolumeOnStub = nil
if fake.volumeOnReturnsOnCall == nil {
fake.volumeOnReturnsOnCall = make(map[int]struct {

View File

@ -2,10 +2,10 @@
package workerfakes
import (
sync "sync"
"sync"
garden "code.cloudfoundry.org/garden"
worker "github.com/concourse/concourse/atc/worker"
"code.cloudfoundry.org/garden"
"github.com/concourse/concourse/atc/worker"
)
type FakeBindMountSource struct {
@ -52,6 +52,12 @@ func (fake *FakeBindMountSource) VolumeOnCallCount() int {
return len(fake.volumeOnArgsForCall)
}
func (fake *FakeBindMountSource) VolumeOnCalls(stub func(worker.Worker) (garden.BindMount, bool, error)) {
fake.volumeOnMutex.Lock()
defer fake.volumeOnMutex.Unlock()
fake.VolumeOnStub = stub
}
func (fake *FakeBindMountSource) VolumeOnArgsForCall(i int) worker.Worker {
fake.volumeOnMutex.RLock()
defer fake.volumeOnMutex.RUnlock()
@ -60,6 +66,8 @@ func (fake *FakeBindMountSource) VolumeOnArgsForCall(i int) worker.Worker {
}
func (fake *FakeBindMountSource) VolumeOnReturns(result1 garden.BindMount, result2 bool, result3 error) {
fake.volumeOnMutex.Lock()
defer fake.volumeOnMutex.Unlock()
fake.VolumeOnStub = nil
fake.volumeOnReturns = struct {
result1 garden.BindMount
@ -69,6 +77,8 @@ func (fake *FakeBindMountSource) VolumeOnReturns(result1 garden.BindMount, resul
}
func (fake *FakeBindMountSource) VolumeOnReturnsOnCall(i int, result1 garden.BindMount, result2 bool, result3 error) {
fake.volumeOnMutex.Lock()
defer fake.volumeOnMutex.Unlock()
fake.VolumeOnStub = nil
if fake.volumeOnReturnsOnCall == nil {
fake.volumeOnReturnsOnCall = make(map[int]struct {

View File

@ -2,12 +2,12 @@
package workerfakes
import (
io "io"
sync "sync"
time "time"
"io"
"sync"
"time"
garden "code.cloudfoundry.org/garden"
worker "github.com/concourse/concourse/atc/worker"
"code.cloudfoundry.org/garden"
"github.com/concourse/concourse/atc/worker"
)
type FakeContainer struct {
@ -322,6 +322,12 @@ func (fake *FakeContainer) AttachCallCount() int {
return len(fake.attachArgsForCall)
}
func (fake *FakeContainer) AttachCalls(stub func(string, garden.ProcessIO) (garden.Process, error)) {
fake.attachMutex.Lock()
defer fake.attachMutex.Unlock()
fake.AttachStub = stub
}
func (fake *FakeContainer) AttachArgsForCall(i int) (string, garden.ProcessIO) {
fake.attachMutex.RLock()
defer fake.attachMutex.RUnlock()
@ -330,6 +336,8 @@ func (fake *FakeContainer) AttachArgsForCall(i int) (string, garden.ProcessIO) {
}
func (fake *FakeContainer) AttachReturns(result1 garden.Process, result2 error) {
fake.attachMutex.Lock()
defer fake.attachMutex.Unlock()
fake.AttachStub = nil
fake.attachReturns = struct {
result1 garden.Process
@ -338,6 +346,8 @@ func (fake *FakeContainer) AttachReturns(result1 garden.Process, result2 error)
}
func (fake *FakeContainer) AttachReturnsOnCall(i int, result1 garden.Process, result2 error) {
fake.attachMutex.Lock()
defer fake.attachMutex.Unlock()
fake.AttachStub = nil
if fake.attachReturnsOnCall == nil {
fake.attachReturnsOnCall = make(map[int]struct {
@ -380,6 +390,12 @@ func (fake *FakeContainer) BulkNetOutCallCount() int {
return len(fake.bulkNetOutArgsForCall)
}
func (fake *FakeContainer) BulkNetOutCalls(stub func([]garden.NetOutRule) error) {
fake.bulkNetOutMutex.Lock()
defer fake.bulkNetOutMutex.Unlock()
fake.BulkNetOutStub = stub
}
func (fake *FakeContainer) BulkNetOutArgsForCall(i int) []garden.NetOutRule {
fake.bulkNetOutMutex.RLock()
defer fake.bulkNetOutMutex.RUnlock()
@ -388,6 +404,8 @@ func (fake *FakeContainer) BulkNetOutArgsForCall(i int) []garden.NetOutRule {
}
func (fake *FakeContainer) BulkNetOutReturns(result1 error) {
fake.bulkNetOutMutex.Lock()
defer fake.bulkNetOutMutex.Unlock()
fake.BulkNetOutStub = nil
fake.bulkNetOutReturns = struct {
result1 error
@ -395,6 +413,8 @@ func (fake *FakeContainer) BulkNetOutReturns(result1 error) {
}
func (fake *FakeContainer) BulkNetOutReturnsOnCall(i int, result1 error) {
fake.bulkNetOutMutex.Lock()
defer fake.bulkNetOutMutex.Unlock()
fake.BulkNetOutStub = nil
if fake.bulkNetOutReturnsOnCall == nil {
fake.bulkNetOutReturnsOnCall = make(map[int]struct {
@ -429,7 +449,15 @@ func (fake *FakeContainer) CurrentBandwidthLimitsCallCount() int {
return len(fake.currentBandwidthLimitsArgsForCall)
}
func (fake *FakeContainer) CurrentBandwidthLimitsCalls(stub func() (garden.BandwidthLimits, error)) {
fake.currentBandwidthLimitsMutex.Lock()
defer fake.currentBandwidthLimitsMutex.Unlock()
fake.CurrentBandwidthLimitsStub = stub
}
func (fake *FakeContainer) CurrentBandwidthLimitsReturns(result1 garden.BandwidthLimits, result2 error) {
fake.currentBandwidthLimitsMutex.Lock()
defer fake.currentBandwidthLimitsMutex.Unlock()
fake.CurrentBandwidthLimitsStub = nil
fake.currentBandwidthLimitsReturns = struct {
result1 garden.BandwidthLimits
@ -438,6 +466,8 @@ func (fake *FakeContainer) CurrentBandwidthLimitsReturns(result1 garden.Bandwidt
}
func (fake *FakeContainer) CurrentBandwidthLimitsReturnsOnCall(i int, result1 garden.BandwidthLimits, result2 error) {
fake.currentBandwidthLimitsMutex.Lock()
defer fake.currentBandwidthLimitsMutex.Unlock()
fake.CurrentBandwidthLimitsStub = nil
if fake.currentBandwidthLimitsReturnsOnCall == nil {
fake.currentBandwidthLimitsReturnsOnCall = make(map[int]struct {
@ -474,7 +504,15 @@ func (fake *FakeContainer) CurrentCPULimitsCallCount() int {
return len(fake.currentCPULimitsArgsForCall)
}
func (fake *FakeContainer) CurrentCPULimitsCalls(stub func() (garden.CPULimits, error)) {
fake.currentCPULimitsMutex.Lock()
defer fake.currentCPULimitsMutex.Unlock()
fake.CurrentCPULimitsStub = stub
}
func (fake *FakeContainer) CurrentCPULimitsReturns(result1 garden.CPULimits, result2 error) {
fake.currentCPULimitsMutex.Lock()
defer fake.currentCPULimitsMutex.Unlock()
fake.CurrentCPULimitsStub = nil
fake.currentCPULimitsReturns = struct {
result1 garden.CPULimits
@ -483,6 +521,8 @@ func (fake *FakeContainer) CurrentCPULimitsReturns(result1 garden.CPULimits, res
}
func (fake *FakeContainer) CurrentCPULimitsReturnsOnCall(i int, result1 garden.CPULimits, result2 error) {
fake.currentCPULimitsMutex.Lock()
defer fake.currentCPULimitsMutex.Unlock()
fake.CurrentCPULimitsStub = nil
if fake.currentCPULimitsReturnsOnCall == nil {
fake.currentCPULimitsReturnsOnCall = make(map[int]struct {
@ -519,7 +559,15 @@ func (fake *FakeContainer) CurrentDiskLimitsCallCount() int {
return len(fake.currentDiskLimitsArgsForCall)
}
func (fake *FakeContainer) CurrentDiskLimitsCalls(stub func() (garden.DiskLimits, error)) {
fake.currentDiskLimitsMutex.Lock()
defer fake.currentDiskLimitsMutex.Unlock()
fake.CurrentDiskLimitsStub = stub
}
func (fake *FakeContainer) CurrentDiskLimitsReturns(result1 garden.DiskLimits, result2 error) {
fake.currentDiskLimitsMutex.Lock()
defer fake.currentDiskLimitsMutex.Unlock()
fake.CurrentDiskLimitsStub = nil
fake.currentDiskLimitsReturns = struct {
result1 garden.DiskLimits
@ -528,6 +576,8 @@ func (fake *FakeContainer) CurrentDiskLimitsReturns(result1 garden.DiskLimits, r
}
func (fake *FakeContainer) CurrentDiskLimitsReturnsOnCall(i int, result1 garden.DiskLimits, result2 error) {
fake.currentDiskLimitsMutex.Lock()
defer fake.currentDiskLimitsMutex.Unlock()
fake.CurrentDiskLimitsStub = nil
if fake.currentDiskLimitsReturnsOnCall == nil {
fake.currentDiskLimitsReturnsOnCall = make(map[int]struct {
@ -564,7 +614,15 @@ func (fake *FakeContainer) CurrentMemoryLimitsCallCount() int {
return len(fake.currentMemoryLimitsArgsForCall)
}
func (fake *FakeContainer) CurrentMemoryLimitsCalls(stub func() (garden.MemoryLimits, error)) {
fake.currentMemoryLimitsMutex.Lock()
defer fake.currentMemoryLimitsMutex.Unlock()
fake.CurrentMemoryLimitsStub = stub
}
func (fake *FakeContainer) CurrentMemoryLimitsReturns(result1 garden.MemoryLimits, result2 error) {
fake.currentMemoryLimitsMutex.Lock()
defer fake.currentMemoryLimitsMutex.Unlock()
fake.CurrentMemoryLimitsStub = nil
fake.currentMemoryLimitsReturns = struct {
result1 garden.MemoryLimits
@ -573,6 +631,8 @@ func (fake *FakeContainer) CurrentMemoryLimitsReturns(result1 garden.MemoryLimit
}
func (fake *FakeContainer) CurrentMemoryLimitsReturnsOnCall(i int, result1 garden.MemoryLimits, result2 error) {
fake.currentMemoryLimitsMutex.Lock()
defer fake.currentMemoryLimitsMutex.Unlock()
fake.CurrentMemoryLimitsStub = nil
if fake.currentMemoryLimitsReturnsOnCall == nil {
fake.currentMemoryLimitsReturnsOnCall = make(map[int]struct {
@ -609,7 +669,15 @@ func (fake *FakeContainer) DestroyCallCount() int {
return len(fake.destroyArgsForCall)
}
func (fake *FakeContainer) DestroyCalls(stub func() error) {
fake.destroyMutex.Lock()
defer fake.destroyMutex.Unlock()
fake.DestroyStub = stub
}
func (fake *FakeContainer) DestroyReturns(result1 error) {
fake.destroyMutex.Lock()
defer fake.destroyMutex.Unlock()
fake.DestroyStub = nil
fake.destroyReturns = struct {
result1 error
@ -617,6 +685,8 @@ func (fake *FakeContainer) DestroyReturns(result1 error) {
}
func (fake *FakeContainer) DestroyReturnsOnCall(i int, result1 error) {
fake.destroyMutex.Lock()
defer fake.destroyMutex.Unlock()
fake.DestroyStub = nil
if fake.destroyReturnsOnCall == nil {
fake.destroyReturnsOnCall = make(map[int]struct {
@ -651,7 +721,15 @@ func (fake *FakeContainer) HandleCallCount() int {
return len(fake.handleArgsForCall)
}
func (fake *FakeContainer) HandleCalls(stub func() string) {
fake.handleMutex.Lock()
defer fake.handleMutex.Unlock()
fake.HandleStub = stub
}
func (fake *FakeContainer) HandleReturns(result1 string) {
fake.handleMutex.Lock()
defer fake.handleMutex.Unlock()
fake.HandleStub = nil
fake.handleReturns = struct {
result1 string
@ -659,6 +737,8 @@ func (fake *FakeContainer) HandleReturns(result1 string) {
}
func (fake *FakeContainer) HandleReturnsOnCall(i int, result1 string) {
fake.handleMutex.Lock()
defer fake.handleMutex.Unlock()
fake.HandleStub = nil
if fake.handleReturnsOnCall == nil {
fake.handleReturnsOnCall = make(map[int]struct {
@ -693,7 +773,15 @@ func (fake *FakeContainer) InfoCallCount() int {
return len(fake.infoArgsForCall)
}
func (fake *FakeContainer) InfoCalls(stub func() (garden.ContainerInfo, error)) {
fake.infoMutex.Lock()
defer fake.infoMutex.Unlock()
fake.InfoStub = stub
}
func (fake *FakeContainer) InfoReturns(result1 garden.ContainerInfo, result2 error) {
fake.infoMutex.Lock()
defer fake.infoMutex.Unlock()
fake.InfoStub = nil
fake.infoReturns = struct {
result1 garden.ContainerInfo
@ -702,6 +790,8 @@ func (fake *FakeContainer) InfoReturns(result1 garden.ContainerInfo, result2 err
}
func (fake *FakeContainer) InfoReturnsOnCall(i int, result1 garden.ContainerInfo, result2 error) {
fake.infoMutex.Lock()
defer fake.infoMutex.Unlock()
fake.InfoStub = nil
if fake.infoReturnsOnCall == nil {
fake.infoReturnsOnCall = make(map[int]struct {
@ -738,7 +828,15 @@ func (fake *FakeContainer) MarkAsHijackedCallCount() int {
return len(fake.markAsHijackedArgsForCall)
}
func (fake *FakeContainer) MarkAsHijackedCalls(stub func() error) {
fake.markAsHijackedMutex.Lock()
defer fake.markAsHijackedMutex.Unlock()
fake.MarkAsHijackedStub = stub
}
func (fake *FakeContainer) MarkAsHijackedReturns(result1 error) {
fake.markAsHijackedMutex.Lock()
defer fake.markAsHijackedMutex.Unlock()
fake.MarkAsHijackedStub = nil
fake.markAsHijackedReturns = struct {
result1 error
@ -746,6 +844,8 @@ func (fake *FakeContainer) MarkAsHijackedReturns(result1 error) {
}
func (fake *FakeContainer) MarkAsHijackedReturnsOnCall(i int, result1 error) {
fake.markAsHijackedMutex.Lock()
defer fake.markAsHijackedMutex.Unlock()
fake.MarkAsHijackedStub = nil
if fake.markAsHijackedReturnsOnCall == nil {
fake.markAsHijackedReturnsOnCall = make(map[int]struct {
@ -780,7 +880,15 @@ func (fake *FakeContainer) MetricsCallCount() int {
return len(fake.metricsArgsForCall)
}
func (fake *FakeContainer) MetricsCalls(stub func() (garden.Metrics, error)) {
fake.metricsMutex.Lock()
defer fake.metricsMutex.Unlock()
fake.MetricsStub = stub
}
func (fake *FakeContainer) MetricsReturns(result1 garden.Metrics, result2 error) {
fake.metricsMutex.Lock()
defer fake.metricsMutex.Unlock()
fake.MetricsStub = nil
fake.metricsReturns = struct {
result1 garden.Metrics
@ -789,6 +897,8 @@ func (fake *FakeContainer) MetricsReturns(result1 garden.Metrics, result2 error)
}
func (fake *FakeContainer) MetricsReturnsOnCall(i int, result1 garden.Metrics, result2 error) {
fake.metricsMutex.Lock()
defer fake.metricsMutex.Unlock()
fake.MetricsStub = nil
if fake.metricsReturnsOnCall == nil {
fake.metricsReturnsOnCall = make(map[int]struct {
@ -827,6 +937,12 @@ func (fake *FakeContainer) NetInCallCount() int {
return len(fake.netInArgsForCall)
}
func (fake *FakeContainer) NetInCalls(stub func(uint32, uint32) (uint32, uint32, error)) {
fake.netInMutex.Lock()
defer fake.netInMutex.Unlock()
fake.NetInStub = stub
}
func (fake *FakeContainer) NetInArgsForCall(i int) (uint32, uint32) {
fake.netInMutex.RLock()
defer fake.netInMutex.RUnlock()
@ -835,6 +951,8 @@ func (fake *FakeContainer) NetInArgsForCall(i int) (uint32, uint32) {
}
func (fake *FakeContainer) NetInReturns(result1 uint32, result2 uint32, result3 error) {
fake.netInMutex.Lock()
defer fake.netInMutex.Unlock()
fake.NetInStub = nil
fake.netInReturns = struct {
result1 uint32
@ -844,6 +962,8 @@ func (fake *FakeContainer) NetInReturns(result1 uint32, result2 uint32, result3
}
func (fake *FakeContainer) NetInReturnsOnCall(i int, result1 uint32, result2 uint32, result3 error) {
fake.netInMutex.Lock()
defer fake.netInMutex.Unlock()
fake.NetInStub = nil
if fake.netInReturnsOnCall == nil {
fake.netInReturnsOnCall = make(map[int]struct {
@ -883,6 +1003,12 @@ func (fake *FakeContainer) NetOutCallCount() int {
return len(fake.netOutArgsForCall)
}
func (fake *FakeContainer) NetOutCalls(stub func(garden.NetOutRule) error) {
fake.netOutMutex.Lock()
defer fake.netOutMutex.Unlock()
fake.NetOutStub = stub
}
func (fake *FakeContainer) NetOutArgsForCall(i int) garden.NetOutRule {
fake.netOutMutex.RLock()
defer fake.netOutMutex.RUnlock()
@ -891,6 +1017,8 @@ func (fake *FakeContainer) NetOutArgsForCall(i int) garden.NetOutRule {
}
func (fake *FakeContainer) NetOutReturns(result1 error) {
fake.netOutMutex.Lock()
defer fake.netOutMutex.Unlock()
fake.NetOutStub = nil
fake.netOutReturns = struct {
result1 error
@ -898,6 +1026,8 @@ func (fake *FakeContainer) NetOutReturns(result1 error) {
}
func (fake *FakeContainer) NetOutReturnsOnCall(i int, result1 error) {
fake.netOutMutex.Lock()
defer fake.netOutMutex.Unlock()
fake.NetOutStub = nil
if fake.netOutReturnsOnCall == nil {
fake.netOutReturnsOnCall = make(map[int]struct {
@ -932,7 +1062,15 @@ func (fake *FakeContainer) PropertiesCallCount() int {
return len(fake.propertiesArgsForCall)
}
func (fake *FakeContainer) PropertiesCalls(stub func() (garden.Properties, error)) {
fake.propertiesMutex.Lock()
defer fake.propertiesMutex.Unlock()
fake.PropertiesStub = stub
}
func (fake *FakeContainer) PropertiesReturns(result1 garden.Properties, result2 error) {
fake.propertiesMutex.Lock()
defer fake.propertiesMutex.Unlock()
fake.PropertiesStub = nil
fake.propertiesReturns = struct {
result1 garden.Properties
@ -941,6 +1079,8 @@ func (fake *FakeContainer) PropertiesReturns(result1 garden.Properties, result2
}
func (fake *FakeContainer) PropertiesReturnsOnCall(i int, result1 garden.Properties, result2 error) {
fake.propertiesMutex.Lock()
defer fake.propertiesMutex.Unlock()
fake.PropertiesStub = nil
if fake.propertiesReturnsOnCall == nil {
fake.propertiesReturnsOnCall = make(map[int]struct {
@ -978,6 +1118,12 @@ func (fake *FakeContainer) PropertyCallCount() int {
return len(fake.propertyArgsForCall)
}
func (fake *FakeContainer) PropertyCalls(stub func(string) (string, error)) {
fake.propertyMutex.Lock()
defer fake.propertyMutex.Unlock()
fake.PropertyStub = stub
}
func (fake *FakeContainer) PropertyArgsForCall(i int) string {
fake.propertyMutex.RLock()
defer fake.propertyMutex.RUnlock()
@ -986,6 +1132,8 @@ func (fake *FakeContainer) PropertyArgsForCall(i int) string {
}
func (fake *FakeContainer) PropertyReturns(result1 string, result2 error) {
fake.propertyMutex.Lock()
defer fake.propertyMutex.Unlock()
fake.PropertyStub = nil
fake.propertyReturns = struct {
result1 string
@ -994,6 +1142,8 @@ func (fake *FakeContainer) PropertyReturns(result1 string, result2 error) {
}
func (fake *FakeContainer) PropertyReturnsOnCall(i int, result1 string, result2 error) {
fake.propertyMutex.Lock()
defer fake.propertyMutex.Unlock()
fake.PropertyStub = nil
if fake.propertyReturnsOnCall == nil {
fake.propertyReturnsOnCall = make(map[int]struct {
@ -1031,6 +1181,12 @@ func (fake *FakeContainer) RemovePropertyCallCount() int {
return len(fake.removePropertyArgsForCall)
}
func (fake *FakeContainer) RemovePropertyCalls(stub func(string) error) {
fake.removePropertyMutex.Lock()
defer fake.removePropertyMutex.Unlock()
fake.RemovePropertyStub = stub
}
func (fake *FakeContainer) RemovePropertyArgsForCall(i int) string {
fake.removePropertyMutex.RLock()
defer fake.removePropertyMutex.RUnlock()
@ -1039,6 +1195,8 @@ func (fake *FakeContainer) RemovePropertyArgsForCall(i int) string {
}
func (fake *FakeContainer) RemovePropertyReturns(result1 error) {
fake.removePropertyMutex.Lock()
defer fake.removePropertyMutex.Unlock()
fake.RemovePropertyStub = nil
fake.removePropertyReturns = struct {
result1 error
@ -1046,6 +1204,8 @@ func (fake *FakeContainer) RemovePropertyReturns(result1 error) {
}
func (fake *FakeContainer) RemovePropertyReturnsOnCall(i int, result1 error) {
fake.removePropertyMutex.Lock()
defer fake.removePropertyMutex.Unlock()
fake.RemovePropertyStub = nil
if fake.removePropertyReturnsOnCall == nil {
fake.removePropertyReturnsOnCall = make(map[int]struct {
@ -1082,6 +1242,12 @@ func (fake *FakeContainer) RunCallCount() int {
return len(fake.runArgsForCall)
}
func (fake *FakeContainer) RunCalls(stub func(garden.ProcessSpec, garden.ProcessIO) (garden.Process, error)) {
fake.runMutex.Lock()
defer fake.runMutex.Unlock()
fake.RunStub = stub
}
func (fake *FakeContainer) RunArgsForCall(i int) (garden.ProcessSpec, garden.ProcessIO) {
fake.runMutex.RLock()
defer fake.runMutex.RUnlock()
@ -1090,6 +1256,8 @@ func (fake *FakeContainer) RunArgsForCall(i int) (garden.ProcessSpec, garden.Pro
}
func (fake *FakeContainer) RunReturns(result1 garden.Process, result2 error) {
fake.runMutex.Lock()
defer fake.runMutex.Unlock()
fake.RunStub = nil
fake.runReturns = struct {
result1 garden.Process
@ -1098,6 +1266,8 @@ func (fake *FakeContainer) RunReturns(result1 garden.Process, result2 error) {
}
func (fake *FakeContainer) RunReturnsOnCall(i int, result1 garden.Process, result2 error) {
fake.runMutex.Lock()
defer fake.runMutex.Unlock()
fake.RunStub = nil
if fake.runReturnsOnCall == nil {
fake.runReturnsOnCall = make(map[int]struct {
@ -1135,6 +1305,12 @@ func (fake *FakeContainer) SetGraceTimeCallCount() int {
return len(fake.setGraceTimeArgsForCall)
}
func (fake *FakeContainer) SetGraceTimeCalls(stub func(time.Duration) error) {
fake.setGraceTimeMutex.Lock()
defer fake.setGraceTimeMutex.Unlock()
fake.SetGraceTimeStub = stub
}
func (fake *FakeContainer) SetGraceTimeArgsForCall(i int) time.Duration {
fake.setGraceTimeMutex.RLock()
defer fake.setGraceTimeMutex.RUnlock()
@ -1143,6 +1319,8 @@ func (fake *FakeContainer) SetGraceTimeArgsForCall(i int) time.Duration {
}
func (fake *FakeContainer) SetGraceTimeReturns(result1 error) {
fake.setGraceTimeMutex.Lock()
defer fake.setGraceTimeMutex.Unlock()
fake.SetGraceTimeStub = nil
fake.setGraceTimeReturns = struct {
result1 error
@ -1150,6 +1328,8 @@ func (fake *FakeContainer) SetGraceTimeReturns(result1 error) {
}
func (fake *FakeContainer) SetGraceTimeReturnsOnCall(i int, result1 error) {
fake.setGraceTimeMutex.Lock()
defer fake.setGraceTimeMutex.Unlock()
fake.SetGraceTimeStub = nil
if fake.setGraceTimeReturnsOnCall == nil {
fake.setGraceTimeReturnsOnCall = make(map[int]struct {
@ -1186,6 +1366,12 @@ func (fake *FakeContainer) SetPropertyCallCount() int {
return len(fake.setPropertyArgsForCall)
}
func (fake *FakeContainer) SetPropertyCalls(stub func(string, string) error) {
fake.setPropertyMutex.Lock()
defer fake.setPropertyMutex.Unlock()
fake.SetPropertyStub = stub
}
func (fake *FakeContainer) SetPropertyArgsForCall(i int) (string, string) {
fake.setPropertyMutex.RLock()
defer fake.setPropertyMutex.RUnlock()
@ -1194,6 +1380,8 @@ func (fake *FakeContainer) SetPropertyArgsForCall(i int) (string, string) {
}
func (fake *FakeContainer) SetPropertyReturns(result1 error) {
fake.setPropertyMutex.Lock()
defer fake.setPropertyMutex.Unlock()
fake.SetPropertyStub = nil
fake.setPropertyReturns = struct {
result1 error
@ -1201,6 +1389,8 @@ func (fake *FakeContainer) SetPropertyReturns(result1 error) {
}
func (fake *FakeContainer) SetPropertyReturnsOnCall(i int, result1 error) {
fake.setPropertyMutex.Lock()
defer fake.setPropertyMutex.Unlock()
fake.SetPropertyStub = nil
if fake.setPropertyReturnsOnCall == nil {
fake.setPropertyReturnsOnCall = make(map[int]struct {
@ -1236,6 +1426,12 @@ func (fake *FakeContainer) StopCallCount() int {
return len(fake.stopArgsForCall)
}
func (fake *FakeContainer) StopCalls(stub func(bool) error) {
fake.stopMutex.Lock()
defer fake.stopMutex.Unlock()
fake.StopStub = stub
}
func (fake *FakeContainer) StopArgsForCall(i int) bool {
fake.stopMutex.RLock()
defer fake.stopMutex.RUnlock()
@ -1244,6 +1440,8 @@ func (fake *FakeContainer) StopArgsForCall(i int) bool {
}
func (fake *FakeContainer) StopReturns(result1 error) {
fake.stopMutex.Lock()
defer fake.stopMutex.Unlock()
fake.StopStub = nil
fake.stopReturns = struct {
result1 error
@ -1251,6 +1449,8 @@ func (fake *FakeContainer) StopReturns(result1 error) {
}
func (fake *FakeContainer) StopReturnsOnCall(i int, result1 error) {
fake.stopMutex.Lock()
defer fake.stopMutex.Unlock()
fake.StopStub = nil
if fake.stopReturnsOnCall == nil {
fake.stopReturnsOnCall = make(map[int]struct {
@ -1286,6 +1486,12 @@ func (fake *FakeContainer) StreamInCallCount() int {
return len(fake.streamInArgsForCall)
}
func (fake *FakeContainer) StreamInCalls(stub func(garden.StreamInSpec) error) {
fake.streamInMutex.Lock()
defer fake.streamInMutex.Unlock()
fake.StreamInStub = stub
}
func (fake *FakeContainer) StreamInArgsForCall(i int) garden.StreamInSpec {
fake.streamInMutex.RLock()
defer fake.streamInMutex.RUnlock()
@ -1294,6 +1500,8 @@ func (fake *FakeContainer) StreamInArgsForCall(i int) garden.StreamInSpec {
}
func (fake *FakeContainer) StreamInReturns(result1 error) {
fake.streamInMutex.Lock()
defer fake.streamInMutex.Unlock()
fake.StreamInStub = nil
fake.streamInReturns = struct {
result1 error
@ -1301,6 +1509,8 @@ func (fake *FakeContainer) StreamInReturns(result1 error) {
}
func (fake *FakeContainer) StreamInReturnsOnCall(i int, result1 error) {
fake.streamInMutex.Lock()
defer fake.streamInMutex.Unlock()
fake.StreamInStub = nil
if fake.streamInReturnsOnCall == nil {
fake.streamInReturnsOnCall = make(map[int]struct {
@ -1336,6 +1546,12 @@ func (fake *FakeContainer) StreamOutCallCount() int {
return len(fake.streamOutArgsForCall)
}
func (fake *FakeContainer) StreamOutCalls(stub func(garden.StreamOutSpec) (io.ReadCloser, error)) {
fake.streamOutMutex.Lock()
defer fake.streamOutMutex.Unlock()
fake.StreamOutStub = stub
}
func (fake *FakeContainer) StreamOutArgsForCall(i int) garden.StreamOutSpec {
fake.streamOutMutex.RLock()
defer fake.streamOutMutex.RUnlock()
@ -1344,6 +1560,8 @@ func (fake *FakeContainer) StreamOutArgsForCall(i int) garden.StreamOutSpec {
}
func (fake *FakeContainer) StreamOutReturns(result1 io.ReadCloser, result2 error) {
fake.streamOutMutex.Lock()
defer fake.streamOutMutex.Unlock()
fake.StreamOutStub = nil
fake.streamOutReturns = struct {
result1 io.ReadCloser
@ -1352,6 +1570,8 @@ func (fake *FakeContainer) StreamOutReturns(result1 io.ReadCloser, result2 error
}
func (fake *FakeContainer) StreamOutReturnsOnCall(i int, result1 io.ReadCloser, result2 error) {
fake.streamOutMutex.Lock()
defer fake.streamOutMutex.Unlock()
fake.StreamOutStub = nil
if fake.streamOutReturnsOnCall == nil {
fake.streamOutReturnsOnCall = make(map[int]struct {
@ -1388,7 +1608,15 @@ func (fake *FakeContainer) VolumeMountsCallCount() int {
return len(fake.volumeMountsArgsForCall)
}
func (fake *FakeContainer) VolumeMountsCalls(stub func() []worker.VolumeMount) {
fake.volumeMountsMutex.Lock()
defer fake.volumeMountsMutex.Unlock()
fake.VolumeMountsStub = stub
}
func (fake *FakeContainer) VolumeMountsReturns(result1 []worker.VolumeMount) {
fake.volumeMountsMutex.Lock()
defer fake.volumeMountsMutex.Unlock()
fake.VolumeMountsStub = nil
fake.volumeMountsReturns = struct {
result1 []worker.VolumeMount
@ -1396,6 +1624,8 @@ func (fake *FakeContainer) VolumeMountsReturns(result1 []worker.VolumeMount) {
}
func (fake *FakeContainer) VolumeMountsReturnsOnCall(i int, result1 []worker.VolumeMount) {
fake.volumeMountsMutex.Lock()
defer fake.volumeMountsMutex.Unlock()
fake.VolumeMountsStub = nil
if fake.volumeMountsReturnsOnCall == nil {
fake.volumeMountsReturnsOnCall = make(map[int]struct {
@ -1430,7 +1660,15 @@ func (fake *FakeContainer) WorkerNameCallCount() int {
return len(fake.workerNameArgsForCall)
}
func (fake *FakeContainer) WorkerNameCalls(stub func() string) {
fake.workerNameMutex.Lock()
defer fake.workerNameMutex.Unlock()
fake.WorkerNameStub = stub
}
func (fake *FakeContainer) WorkerNameReturns(result1 string) {
fake.workerNameMutex.Lock()
defer fake.workerNameMutex.Unlock()
fake.WorkerNameStub = nil
fake.workerNameReturns = struct {
result1 string
@ -1438,6 +1676,8 @@ func (fake *FakeContainer) WorkerNameReturns(result1 string) {
}
func (fake *FakeContainer) WorkerNameReturnsOnCall(i int, result1 string) {
fake.workerNameMutex.Lock()
defer fake.workerNameMutex.Unlock()
fake.WorkerNameStub = nil
if fake.workerNameReturnsOnCall == nil {
fake.workerNameReturnsOnCall = make(map[int]struct {

View File

@ -2,10 +2,10 @@
package workerfakes
import (
sync "sync"
"sync"
lager "code.cloudfoundry.org/lager"
worker "github.com/concourse/concourse/atc/worker"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/worker"
)
type FakeContainerPlacementStrategy struct {
@ -24,6 +24,16 @@ type FakeContainerPlacementStrategy struct {
result1 worker.Worker
result2 error
}
ModifiesActiveTasksStub func() bool
modifiesActiveTasksMutex sync.RWMutex
modifiesActiveTasksArgsForCall []struct {
}
modifiesActiveTasksReturns struct {
result1 bool
}
modifiesActiveTasksReturnsOnCall map[int]struct {
result1 bool
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
@ -59,6 +69,12 @@ func (fake *FakeContainerPlacementStrategy) ChooseCallCount() int {
return len(fake.chooseArgsForCall)
}
func (fake *FakeContainerPlacementStrategy) ChooseCalls(stub func(lager.Logger, []worker.Worker, worker.ContainerSpec) (worker.Worker, error)) {
fake.chooseMutex.Lock()
defer fake.chooseMutex.Unlock()
fake.ChooseStub = stub
}
func (fake *FakeContainerPlacementStrategy) ChooseArgsForCall(i int) (lager.Logger, []worker.Worker, worker.ContainerSpec) {
fake.chooseMutex.RLock()
defer fake.chooseMutex.RUnlock()
@ -67,6 +83,8 @@ func (fake *FakeContainerPlacementStrategy) ChooseArgsForCall(i int) (lager.Logg
}
func (fake *FakeContainerPlacementStrategy) ChooseReturns(result1 worker.Worker, result2 error) {
fake.chooseMutex.Lock()
defer fake.chooseMutex.Unlock()
fake.ChooseStub = nil
fake.chooseReturns = struct {
result1 worker.Worker
@ -75,6 +93,8 @@ func (fake *FakeContainerPlacementStrategy) ChooseReturns(result1 worker.Worker,
}
func (fake *FakeContainerPlacementStrategy) ChooseReturnsOnCall(i int, result1 worker.Worker, result2 error) {
fake.chooseMutex.Lock()
defer fake.chooseMutex.Unlock()
fake.ChooseStub = nil
if fake.chooseReturnsOnCall == nil {
fake.chooseReturnsOnCall = make(map[int]struct {
@ -88,11 +108,65 @@ func (fake *FakeContainerPlacementStrategy) ChooseReturnsOnCall(i int, result1 w
}{result1, result2}
}
func (fake *FakeContainerPlacementStrategy) ModifiesActiveTasks() bool {
fake.modifiesActiveTasksMutex.Lock()
ret, specificReturn := fake.modifiesActiveTasksReturnsOnCall[len(fake.modifiesActiveTasksArgsForCall)]
fake.modifiesActiveTasksArgsForCall = append(fake.modifiesActiveTasksArgsForCall, struct {
}{})
fake.recordInvocation("ModifiesActiveTasks", []interface{}{})
fake.modifiesActiveTasksMutex.Unlock()
if fake.ModifiesActiveTasksStub != nil {
return fake.ModifiesActiveTasksStub()
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.modifiesActiveTasksReturns
return fakeReturns.result1
}
func (fake *FakeContainerPlacementStrategy) ModifiesActiveTasksCallCount() int {
fake.modifiesActiveTasksMutex.RLock()
defer fake.modifiesActiveTasksMutex.RUnlock()
return len(fake.modifiesActiveTasksArgsForCall)
}
func (fake *FakeContainerPlacementStrategy) ModifiesActiveTasksCalls(stub func() bool) {
fake.modifiesActiveTasksMutex.Lock()
defer fake.modifiesActiveTasksMutex.Unlock()
fake.ModifiesActiveTasksStub = stub
}
func (fake *FakeContainerPlacementStrategy) ModifiesActiveTasksReturns(result1 bool) {
fake.modifiesActiveTasksMutex.Lock()
defer fake.modifiesActiveTasksMutex.Unlock()
fake.ModifiesActiveTasksStub = nil
fake.modifiesActiveTasksReturns = struct {
result1 bool
}{result1}
}
func (fake *FakeContainerPlacementStrategy) ModifiesActiveTasksReturnsOnCall(i int, result1 bool) {
fake.modifiesActiveTasksMutex.Lock()
defer fake.modifiesActiveTasksMutex.Unlock()
fake.ModifiesActiveTasksStub = nil
if fake.modifiesActiveTasksReturnsOnCall == nil {
fake.modifiesActiveTasksReturnsOnCall = make(map[int]struct {
result1 bool
})
}
fake.modifiesActiveTasksReturnsOnCall[i] = struct {
result1 bool
}{result1}
}
func (fake *FakeContainerPlacementStrategy) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.chooseMutex.RLock()
defer fake.chooseMutex.RUnlock()
fake.modifiesActiveTasksMutex.RLock()
defer fake.modifiesActiveTasksMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value

View File

@ -2,12 +2,12 @@
package workerfakes
import (
context "context"
sync "sync"
"context"
"sync"
lager "code.cloudfoundry.org/lager"
db "github.com/concourse/concourse/atc/db"
worker "github.com/concourse/concourse/atc/worker"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
)
type FakeImage struct {
@ -56,6 +56,12 @@ func (fake *FakeImage) FetchForContainerCallCount() int {
return len(fake.fetchForContainerArgsForCall)
}
func (fake *FakeImage) FetchForContainerCalls(stub func(context.Context, lager.Logger, db.CreatingContainer) (worker.FetchedImage, error)) {
fake.fetchForContainerMutex.Lock()
defer fake.fetchForContainerMutex.Unlock()
fake.FetchForContainerStub = stub
}
func (fake *FakeImage) FetchForContainerArgsForCall(i int) (context.Context, lager.Logger, db.CreatingContainer) {
fake.fetchForContainerMutex.RLock()
defer fake.fetchForContainerMutex.RUnlock()
@ -64,6 +70,8 @@ func (fake *FakeImage) FetchForContainerArgsForCall(i int) (context.Context, lag
}
func (fake *FakeImage) FetchForContainerReturns(result1 worker.FetchedImage, result2 error) {
fake.fetchForContainerMutex.Lock()
defer fake.fetchForContainerMutex.Unlock()
fake.FetchForContainerStub = nil
fake.fetchForContainerReturns = struct {
result1 worker.FetchedImage
@ -72,6 +80,8 @@ func (fake *FakeImage) FetchForContainerReturns(result1 worker.FetchedImage, res
}
func (fake *FakeImage) FetchForContainerReturnsOnCall(i int, result1 worker.FetchedImage, result2 error) {
fake.fetchForContainerMutex.Lock()
defer fake.fetchForContainerMutex.Unlock()
fake.FetchForContainerStub = nil
if fake.fetchForContainerReturnsOnCall == nil {
fake.fetchForContainerReturnsOnCall = make(map[int]struct {

View File

@ -2,11 +2,11 @@
package workerfakes
import (
sync "sync"
"sync"
lager "code.cloudfoundry.org/lager"
atc "github.com/concourse/concourse/atc"
worker "github.com/concourse/concourse/atc/worker"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/worker"
)
type FakeImageFactory struct {
@ -63,6 +63,12 @@ func (fake *FakeImageFactory) GetImageCallCount() int {
return len(fake.getImageArgsForCall)
}
func (fake *FakeImageFactory) GetImageCalls(stub func(lager.Logger, worker.Worker, worker.VolumeClient, worker.ImageSpec, int, worker.ImageFetchingDelegate, atc.VersionedResourceTypes) (worker.Image, error)) {
fake.getImageMutex.Lock()
defer fake.getImageMutex.Unlock()
fake.GetImageStub = stub
}
func (fake *FakeImageFactory) GetImageArgsForCall(i int) (lager.Logger, worker.Worker, worker.VolumeClient, worker.ImageSpec, int, worker.ImageFetchingDelegate, atc.VersionedResourceTypes) {
fake.getImageMutex.RLock()
defer fake.getImageMutex.RUnlock()
@ -71,6 +77,8 @@ func (fake *FakeImageFactory) GetImageArgsForCall(i int) (lager.Logger, worker.W
}
func (fake *FakeImageFactory) GetImageReturns(result1 worker.Image, result2 error) {
fake.getImageMutex.Lock()
defer fake.getImageMutex.Unlock()
fake.GetImageStub = nil
fake.getImageReturns = struct {
result1 worker.Image
@ -79,6 +87,8 @@ func (fake *FakeImageFactory) GetImageReturns(result1 worker.Image, result2 erro
}
func (fake *FakeImageFactory) GetImageReturnsOnCall(i int, result1 worker.Image, result2 error) {
fake.getImageMutex.Lock()
defer fake.getImageMutex.Unlock()
fake.GetImageStub = nil
if fake.getImageReturnsOnCall == nil {
fake.getImageReturnsOnCall = make(map[int]struct {

View File

@ -2,11 +2,11 @@
package workerfakes
import (
io "io"
sync "sync"
"io"
"sync"
db "github.com/concourse/concourse/atc/db"
worker "github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
)
type FakeImageFetchingDelegate struct {
@ -69,6 +69,12 @@ func (fake *FakeImageFetchingDelegate) ImageVersionDeterminedCallCount() int {
return len(fake.imageVersionDeterminedArgsForCall)
}
func (fake *FakeImageFetchingDelegate) ImageVersionDeterminedCalls(stub func(db.UsedResourceCache) error) {
fake.imageVersionDeterminedMutex.Lock()
defer fake.imageVersionDeterminedMutex.Unlock()
fake.ImageVersionDeterminedStub = stub
}
func (fake *FakeImageFetchingDelegate) ImageVersionDeterminedArgsForCall(i int) db.UsedResourceCache {
fake.imageVersionDeterminedMutex.RLock()
defer fake.imageVersionDeterminedMutex.RUnlock()
@ -77,6 +83,8 @@ func (fake *FakeImageFetchingDelegate) ImageVersionDeterminedArgsForCall(i int)
}
func (fake *FakeImageFetchingDelegate) ImageVersionDeterminedReturns(result1 error) {
fake.imageVersionDeterminedMutex.Lock()
defer fake.imageVersionDeterminedMutex.Unlock()
fake.ImageVersionDeterminedStub = nil
fake.imageVersionDeterminedReturns = struct {
result1 error
@ -84,6 +92,8 @@ func (fake *FakeImageFetchingDelegate) ImageVersionDeterminedReturns(result1 err
}
func (fake *FakeImageFetchingDelegate) ImageVersionDeterminedReturnsOnCall(i int, result1 error) {
fake.imageVersionDeterminedMutex.Lock()
defer fake.imageVersionDeterminedMutex.Unlock()
fake.ImageVersionDeterminedStub = nil
if fake.imageVersionDeterminedReturnsOnCall == nil {
fake.imageVersionDeterminedReturnsOnCall = make(map[int]struct {
@ -118,7 +128,15 @@ func (fake *FakeImageFetchingDelegate) StderrCallCount() int {
return len(fake.stderrArgsForCall)
}
func (fake *FakeImageFetchingDelegate) StderrCalls(stub func() io.Writer) {
fake.stderrMutex.Lock()
defer fake.stderrMutex.Unlock()
fake.StderrStub = stub
}
func (fake *FakeImageFetchingDelegate) StderrReturns(result1 io.Writer) {
fake.stderrMutex.Lock()
defer fake.stderrMutex.Unlock()
fake.StderrStub = nil
fake.stderrReturns = struct {
result1 io.Writer
@ -126,6 +144,8 @@ func (fake *FakeImageFetchingDelegate) StderrReturns(result1 io.Writer) {
}
func (fake *FakeImageFetchingDelegate) StderrReturnsOnCall(i int, result1 io.Writer) {
fake.stderrMutex.Lock()
defer fake.stderrMutex.Unlock()
fake.StderrStub = nil
if fake.stderrReturnsOnCall == nil {
fake.stderrReturnsOnCall = make(map[int]struct {
@ -160,7 +180,15 @@ func (fake *FakeImageFetchingDelegate) StdoutCallCount() int {
return len(fake.stdoutArgsForCall)
}
func (fake *FakeImageFetchingDelegate) StdoutCalls(stub func() io.Writer) {
fake.stdoutMutex.Lock()
defer fake.stdoutMutex.Unlock()
fake.StdoutStub = stub
}
func (fake *FakeImageFetchingDelegate) StdoutReturns(result1 io.Writer) {
fake.stdoutMutex.Lock()
defer fake.stdoutMutex.Unlock()
fake.StdoutStub = nil
fake.stdoutReturns = struct {
result1 io.Writer
@ -168,6 +196,8 @@ func (fake *FakeImageFetchingDelegate) StdoutReturns(result1 io.Writer) {
}
func (fake *FakeImageFetchingDelegate) StdoutReturnsOnCall(i int, result1 io.Writer) {
fake.stdoutMutex.Lock()
defer fake.stdoutMutex.Unlock()
fake.StdoutStub = nil
if fake.stdoutReturnsOnCall == nil {
fake.stdoutReturnsOnCall = make(map[int]struct {

View File

@ -2,9 +2,9 @@
package workerfakes
import (
sync "sync"
"sync"
worker "github.com/concourse/concourse/atc/worker"
"github.com/concourse/concourse/atc/worker"
)
type FakeInputSource struct {
@ -55,7 +55,15 @@ func (fake *FakeInputSource) DestinationPathCallCount() int {
return len(fake.destinationPathArgsForCall)
}
func (fake *FakeInputSource) DestinationPathCalls(stub func() string) {
fake.destinationPathMutex.Lock()
defer fake.destinationPathMutex.Unlock()
fake.DestinationPathStub = stub
}
func (fake *FakeInputSource) DestinationPathReturns(result1 string) {
fake.destinationPathMutex.Lock()
defer fake.destinationPathMutex.Unlock()
fake.DestinationPathStub = nil
fake.destinationPathReturns = struct {
result1 string
@ -63,6 +71,8 @@ func (fake *FakeInputSource) DestinationPathReturns(result1 string) {
}
func (fake *FakeInputSource) DestinationPathReturnsOnCall(i int, result1 string) {
fake.destinationPathMutex.Lock()
defer fake.destinationPathMutex.Unlock()
fake.DestinationPathStub = nil
if fake.destinationPathReturnsOnCall == nil {
fake.destinationPathReturnsOnCall = make(map[int]struct {
@ -97,7 +107,15 @@ func (fake *FakeInputSource) SourceCallCount() int {
return len(fake.sourceArgsForCall)
}
func (fake *FakeInputSource) SourceCalls(stub func() worker.ArtifactSource) {
fake.sourceMutex.Lock()
defer fake.sourceMutex.Unlock()
fake.SourceStub = stub
}
func (fake *FakeInputSource) SourceReturns(result1 worker.ArtifactSource) {
fake.sourceMutex.Lock()
defer fake.sourceMutex.Unlock()
fake.SourceStub = nil
fake.sourceReturns = struct {
result1 worker.ArtifactSource
@ -105,6 +123,8 @@ func (fake *FakeInputSource) SourceReturns(result1 worker.ArtifactSource) {
}
func (fake *FakeInputSource) SourceReturnsOnCall(i int, result1 worker.ArtifactSource) {
fake.sourceMutex.Lock()
defer fake.sourceMutex.Unlock()
fake.SourceStub = nil
if fake.sourceReturnsOnCall == nil {
fake.sourceReturnsOnCall = make(map[int]struct {

View File

@ -2,12 +2,12 @@
package workerfakes
import (
context "context"
sync "sync"
"context"
"sync"
lager "code.cloudfoundry.org/lager"
db "github.com/concourse/concourse/atc/db"
worker "github.com/concourse/concourse/atc/worker"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
)
type FakePool struct {
@ -72,6 +72,12 @@ func (fake *FakePool) FindOrChooseWorkerCallCount() int {
return len(fake.findOrChooseWorkerArgsForCall)
}
func (fake *FakePool) FindOrChooseWorkerCalls(stub func(lager.Logger, worker.WorkerSpec) (worker.Worker, error)) {
fake.findOrChooseWorkerMutex.Lock()
defer fake.findOrChooseWorkerMutex.Unlock()
fake.FindOrChooseWorkerStub = stub
}
func (fake *FakePool) FindOrChooseWorkerArgsForCall(i int) (lager.Logger, worker.WorkerSpec) {
fake.findOrChooseWorkerMutex.RLock()
defer fake.findOrChooseWorkerMutex.RUnlock()
@ -80,6 +86,8 @@ func (fake *FakePool) FindOrChooseWorkerArgsForCall(i int) (lager.Logger, worker
}
func (fake *FakePool) FindOrChooseWorkerReturns(result1 worker.Worker, result2 error) {
fake.findOrChooseWorkerMutex.Lock()
defer fake.findOrChooseWorkerMutex.Unlock()
fake.FindOrChooseWorkerStub = nil
fake.findOrChooseWorkerReturns = struct {
result1 worker.Worker
@ -88,6 +96,8 @@ func (fake *FakePool) FindOrChooseWorkerReturns(result1 worker.Worker, result2 e
}
func (fake *FakePool) FindOrChooseWorkerReturnsOnCall(i int, result1 worker.Worker, result2 error) {
fake.findOrChooseWorkerMutex.Lock()
defer fake.findOrChooseWorkerMutex.Unlock()
fake.FindOrChooseWorkerStub = nil
if fake.findOrChooseWorkerReturnsOnCall == nil {
fake.findOrChooseWorkerReturnsOnCall = make(map[int]struct {
@ -130,6 +140,12 @@ func (fake *FakePool) FindOrChooseWorkerForContainerCallCount() int {
return len(fake.findOrChooseWorkerForContainerArgsForCall)
}
func (fake *FakePool) FindOrChooseWorkerForContainerCalls(stub func(context.Context, lager.Logger, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy) (worker.Worker, error)) {
fake.findOrChooseWorkerForContainerMutex.Lock()
defer fake.findOrChooseWorkerForContainerMutex.Unlock()
fake.FindOrChooseWorkerForContainerStub = stub
}
func (fake *FakePool) FindOrChooseWorkerForContainerArgsForCall(i int) (context.Context, lager.Logger, db.ContainerOwner, worker.ContainerSpec, worker.WorkerSpec, worker.ContainerPlacementStrategy) {
fake.findOrChooseWorkerForContainerMutex.RLock()
defer fake.findOrChooseWorkerForContainerMutex.RUnlock()
@ -138,6 +154,8 @@ func (fake *FakePool) FindOrChooseWorkerForContainerArgsForCall(i int) (context.
}
func (fake *FakePool) FindOrChooseWorkerForContainerReturns(result1 worker.Worker, result2 error) {
fake.findOrChooseWorkerForContainerMutex.Lock()
defer fake.findOrChooseWorkerForContainerMutex.Unlock()
fake.FindOrChooseWorkerForContainerStub = nil
fake.findOrChooseWorkerForContainerReturns = struct {
result1 worker.Worker
@ -146,6 +164,8 @@ func (fake *FakePool) FindOrChooseWorkerForContainerReturns(result1 worker.Worke
}
func (fake *FakePool) FindOrChooseWorkerForContainerReturnsOnCall(i int, result1 worker.Worker, result2 error) {
fake.findOrChooseWorkerForContainerMutex.Lock()
defer fake.findOrChooseWorkerForContainerMutex.Unlock()
fake.FindOrChooseWorkerForContainerStub = nil
if fake.findOrChooseWorkerForContainerReturnsOnCall == nil {
fake.findOrChooseWorkerForContainerReturnsOnCall = make(map[int]struct {

View File

@ -2,13 +2,13 @@
package workerfakes
import (
io "io"
sync "sync"
"io"
"sync"
lager "code.cloudfoundry.org/lager"
baggageclaim "github.com/concourse/baggageclaim"
db "github.com/concourse/concourse/atc/db"
worker "github.com/concourse/concourse/atc/worker"
"code.cloudfoundry.org/lager"
"github.com/concourse/baggageclaim"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
)
type FakeVolume struct {
@ -203,7 +203,15 @@ func (fake *FakeVolume) COWStrategyCallCount() int {
return len(fake.cOWStrategyArgsForCall)
}
func (fake *FakeVolume) COWStrategyCalls(stub func() baggageclaim.COWStrategy) {
fake.cOWStrategyMutex.Lock()
defer fake.cOWStrategyMutex.Unlock()
fake.COWStrategyStub = stub
}
func (fake *FakeVolume) COWStrategyReturns(result1 baggageclaim.COWStrategy) {
fake.cOWStrategyMutex.Lock()
defer fake.cOWStrategyMutex.Unlock()
fake.COWStrategyStub = nil
fake.cOWStrategyReturns = struct {
result1 baggageclaim.COWStrategy
@ -211,6 +219,8 @@ func (fake *FakeVolume) COWStrategyReturns(result1 baggageclaim.COWStrategy) {
}
func (fake *FakeVolume) COWStrategyReturnsOnCall(i int, result1 baggageclaim.COWStrategy) {
fake.cOWStrategyMutex.Lock()
defer fake.cOWStrategyMutex.Unlock()
fake.COWStrategyStub = nil
if fake.cOWStrategyReturnsOnCall == nil {
fake.cOWStrategyReturnsOnCall = make(map[int]struct {
@ -247,6 +257,12 @@ func (fake *FakeVolume) CreateChildForContainerCallCount() int {
return len(fake.createChildForContainerArgsForCall)
}
func (fake *FakeVolume) CreateChildForContainerCalls(stub func(db.CreatingContainer, string) (db.CreatingVolume, error)) {
fake.createChildForContainerMutex.Lock()
defer fake.createChildForContainerMutex.Unlock()
fake.CreateChildForContainerStub = stub
}
func (fake *FakeVolume) CreateChildForContainerArgsForCall(i int) (db.CreatingContainer, string) {
fake.createChildForContainerMutex.RLock()
defer fake.createChildForContainerMutex.RUnlock()
@ -255,6 +271,8 @@ func (fake *FakeVolume) CreateChildForContainerArgsForCall(i int) (db.CreatingCo
}
func (fake *FakeVolume) CreateChildForContainerReturns(result1 db.CreatingVolume, result2 error) {
fake.createChildForContainerMutex.Lock()
defer fake.createChildForContainerMutex.Unlock()
fake.CreateChildForContainerStub = nil
fake.createChildForContainerReturns = struct {
result1 db.CreatingVolume
@ -263,6 +281,8 @@ func (fake *FakeVolume) CreateChildForContainerReturns(result1 db.CreatingVolume
}
func (fake *FakeVolume) CreateChildForContainerReturnsOnCall(i int, result1 db.CreatingVolume, result2 error) {
fake.createChildForContainerMutex.Lock()
defer fake.createChildForContainerMutex.Unlock()
fake.CreateChildForContainerStub = nil
if fake.createChildForContainerReturnsOnCall == nil {
fake.createChildForContainerReturnsOnCall = make(map[int]struct {
@ -299,7 +319,15 @@ func (fake *FakeVolume) DestroyCallCount() int {
return len(fake.destroyArgsForCall)
}
func (fake *FakeVolume) DestroyCalls(stub func() error) {
fake.destroyMutex.Lock()
defer fake.destroyMutex.Unlock()
fake.DestroyStub = stub
}
func (fake *FakeVolume) DestroyReturns(result1 error) {
fake.destroyMutex.Lock()
defer fake.destroyMutex.Unlock()
fake.DestroyStub = nil
fake.destroyReturns = struct {
result1 error
@ -307,6 +335,8 @@ func (fake *FakeVolume) DestroyReturns(result1 error) {
}
func (fake *FakeVolume) DestroyReturnsOnCall(i int, result1 error) {
fake.destroyMutex.Lock()
defer fake.destroyMutex.Unlock()
fake.DestroyStub = nil
if fake.destroyReturnsOnCall == nil {
fake.destroyReturnsOnCall = make(map[int]struct {
@ -341,7 +371,15 @@ func (fake *FakeVolume) HandleCallCount() int {
return len(fake.handleArgsForCall)
}
func (fake *FakeVolume) HandleCalls(stub func() string) {
fake.handleMutex.Lock()
defer fake.handleMutex.Unlock()
fake.HandleStub = stub
}
func (fake *FakeVolume) HandleReturns(result1 string) {
fake.handleMutex.Lock()
defer fake.handleMutex.Unlock()
fake.HandleStub = nil
fake.handleReturns = struct {
result1 string
@ -349,6 +387,8 @@ func (fake *FakeVolume) HandleReturns(result1 string) {
}
func (fake *FakeVolume) HandleReturnsOnCall(i int, result1 string) {
fake.handleMutex.Lock()
defer fake.handleMutex.Unlock()
fake.HandleStub = nil
if fake.handleReturnsOnCall == nil {
fake.handleReturnsOnCall = make(map[int]struct {
@ -385,6 +425,12 @@ func (fake *FakeVolume) InitializeArtifactCallCount() int {
return len(fake.initializeArtifactArgsForCall)
}
func (fake *FakeVolume) InitializeArtifactCalls(stub func(string, int) (db.WorkerArtifact, error)) {
fake.initializeArtifactMutex.Lock()
defer fake.initializeArtifactMutex.Unlock()
fake.InitializeArtifactStub = stub
}
func (fake *FakeVolume) InitializeArtifactArgsForCall(i int) (string, int) {
fake.initializeArtifactMutex.RLock()
defer fake.initializeArtifactMutex.RUnlock()
@ -393,6 +439,8 @@ func (fake *FakeVolume) InitializeArtifactArgsForCall(i int) (string, int) {
}
func (fake *FakeVolume) InitializeArtifactReturns(result1 db.WorkerArtifact, result2 error) {
fake.initializeArtifactMutex.Lock()
defer fake.initializeArtifactMutex.Unlock()
fake.InitializeArtifactStub = nil
fake.initializeArtifactReturns = struct {
result1 db.WorkerArtifact
@ -401,6 +449,8 @@ func (fake *FakeVolume) InitializeArtifactReturns(result1 db.WorkerArtifact, res
}
func (fake *FakeVolume) InitializeArtifactReturnsOnCall(i int, result1 db.WorkerArtifact, result2 error) {
fake.initializeArtifactMutex.Lock()
defer fake.initializeArtifactMutex.Unlock()
fake.InitializeArtifactStub = nil
if fake.initializeArtifactReturnsOnCall == nil {
fake.initializeArtifactReturnsOnCall = make(map[int]struct {
@ -438,6 +488,12 @@ func (fake *FakeVolume) InitializeResourceCacheCallCount() int {
return len(fake.initializeResourceCacheArgsForCall)
}
func (fake *FakeVolume) InitializeResourceCacheCalls(stub func(db.UsedResourceCache) error) {
fake.initializeResourceCacheMutex.Lock()
defer fake.initializeResourceCacheMutex.Unlock()
fake.InitializeResourceCacheStub = stub
}
func (fake *FakeVolume) InitializeResourceCacheArgsForCall(i int) db.UsedResourceCache {
fake.initializeResourceCacheMutex.RLock()
defer fake.initializeResourceCacheMutex.RUnlock()
@ -446,6 +502,8 @@ func (fake *FakeVolume) InitializeResourceCacheArgsForCall(i int) db.UsedResourc
}
func (fake *FakeVolume) InitializeResourceCacheReturns(result1 error) {
fake.initializeResourceCacheMutex.Lock()
defer fake.initializeResourceCacheMutex.Unlock()
fake.InitializeResourceCacheStub = nil
fake.initializeResourceCacheReturns = struct {
result1 error
@ -453,6 +511,8 @@ func (fake *FakeVolume) InitializeResourceCacheReturns(result1 error) {
}
func (fake *FakeVolume) InitializeResourceCacheReturnsOnCall(i int, result1 error) {
fake.initializeResourceCacheMutex.Lock()
defer fake.initializeResourceCacheMutex.Unlock()
fake.InitializeResourceCacheStub = nil
if fake.initializeResourceCacheReturnsOnCall == nil {
fake.initializeResourceCacheReturnsOnCall = make(map[int]struct {
@ -492,6 +552,12 @@ func (fake *FakeVolume) InitializeTaskCacheCallCount() int {
return len(fake.initializeTaskCacheArgsForCall)
}
func (fake *FakeVolume) InitializeTaskCacheCalls(stub func(lager.Logger, int, string, string, bool) error) {
fake.initializeTaskCacheMutex.Lock()
defer fake.initializeTaskCacheMutex.Unlock()
fake.InitializeTaskCacheStub = stub
}
func (fake *FakeVolume) InitializeTaskCacheArgsForCall(i int) (lager.Logger, int, string, string, bool) {
fake.initializeTaskCacheMutex.RLock()
defer fake.initializeTaskCacheMutex.RUnlock()
@ -500,6 +566,8 @@ func (fake *FakeVolume) InitializeTaskCacheArgsForCall(i int) (lager.Logger, int
}
func (fake *FakeVolume) InitializeTaskCacheReturns(result1 error) {
fake.initializeTaskCacheMutex.Lock()
defer fake.initializeTaskCacheMutex.Unlock()
fake.InitializeTaskCacheStub = nil
fake.initializeTaskCacheReturns = struct {
result1 error
@ -507,6 +575,8 @@ func (fake *FakeVolume) InitializeTaskCacheReturns(result1 error) {
}
func (fake *FakeVolume) InitializeTaskCacheReturnsOnCall(i int, result1 error) {
fake.initializeTaskCacheMutex.Lock()
defer fake.initializeTaskCacheMutex.Unlock()
fake.InitializeTaskCacheStub = nil
if fake.initializeTaskCacheReturnsOnCall == nil {
fake.initializeTaskCacheReturnsOnCall = make(map[int]struct {
@ -541,7 +611,15 @@ func (fake *FakeVolume) PathCallCount() int {
return len(fake.pathArgsForCall)
}
func (fake *FakeVolume) PathCalls(stub func() string) {
fake.pathMutex.Lock()
defer fake.pathMutex.Unlock()
fake.PathStub = stub
}
func (fake *FakeVolume) PathReturns(result1 string) {
fake.pathMutex.Lock()
defer fake.pathMutex.Unlock()
fake.PathStub = nil
fake.pathReturns = struct {
result1 string
@ -549,6 +627,8 @@ func (fake *FakeVolume) PathReturns(result1 string) {
}
func (fake *FakeVolume) PathReturnsOnCall(i int, result1 string) {
fake.pathMutex.Lock()
defer fake.pathMutex.Unlock()
fake.PathStub = nil
if fake.pathReturnsOnCall == nil {
fake.pathReturnsOnCall = make(map[int]struct {
@ -583,7 +663,15 @@ func (fake *FakeVolume) PropertiesCallCount() int {
return len(fake.propertiesArgsForCall)
}
func (fake *FakeVolume) PropertiesCalls(stub func() (baggageclaim.VolumeProperties, error)) {
fake.propertiesMutex.Lock()
defer fake.propertiesMutex.Unlock()
fake.PropertiesStub = stub
}
func (fake *FakeVolume) PropertiesReturns(result1 baggageclaim.VolumeProperties, result2 error) {
fake.propertiesMutex.Lock()
defer fake.propertiesMutex.Unlock()
fake.PropertiesStub = nil
fake.propertiesReturns = struct {
result1 baggageclaim.VolumeProperties
@ -592,6 +680,8 @@ func (fake *FakeVolume) PropertiesReturns(result1 baggageclaim.VolumeProperties,
}
func (fake *FakeVolume) PropertiesReturnsOnCall(i int, result1 baggageclaim.VolumeProperties, result2 error) {
fake.propertiesMutex.Lock()
defer fake.propertiesMutex.Unlock()
fake.PropertiesStub = nil
if fake.propertiesReturnsOnCall == nil {
fake.propertiesReturnsOnCall = make(map[int]struct {
@ -629,6 +719,12 @@ func (fake *FakeVolume) SetPrivilegedCallCount() int {
return len(fake.setPrivilegedArgsForCall)
}
func (fake *FakeVolume) SetPrivilegedCalls(stub func(bool) error) {
fake.setPrivilegedMutex.Lock()
defer fake.setPrivilegedMutex.Unlock()
fake.SetPrivilegedStub = stub
}
func (fake *FakeVolume) SetPrivilegedArgsForCall(i int) bool {
fake.setPrivilegedMutex.RLock()
defer fake.setPrivilegedMutex.RUnlock()
@ -637,6 +733,8 @@ func (fake *FakeVolume) SetPrivilegedArgsForCall(i int) bool {
}
func (fake *FakeVolume) SetPrivilegedReturns(result1 error) {
fake.setPrivilegedMutex.Lock()
defer fake.setPrivilegedMutex.Unlock()
fake.SetPrivilegedStub = nil
fake.setPrivilegedReturns = struct {
result1 error
@ -644,6 +742,8 @@ func (fake *FakeVolume) SetPrivilegedReturns(result1 error) {
}
func (fake *FakeVolume) SetPrivilegedReturnsOnCall(i int, result1 error) {
fake.setPrivilegedMutex.Lock()
defer fake.setPrivilegedMutex.Unlock()
fake.SetPrivilegedStub = nil
if fake.setPrivilegedReturnsOnCall == nil {
fake.setPrivilegedReturnsOnCall = make(map[int]struct {
@ -680,6 +780,12 @@ func (fake *FakeVolume) SetPropertyCallCount() int {
return len(fake.setPropertyArgsForCall)
}
func (fake *FakeVolume) SetPropertyCalls(stub func(string, string) error) {
fake.setPropertyMutex.Lock()
defer fake.setPropertyMutex.Unlock()
fake.SetPropertyStub = stub
}
func (fake *FakeVolume) SetPropertyArgsForCall(i int) (string, string) {
fake.setPropertyMutex.RLock()
defer fake.setPropertyMutex.RUnlock()
@ -688,6 +794,8 @@ func (fake *FakeVolume) SetPropertyArgsForCall(i int) (string, string) {
}
func (fake *FakeVolume) SetPropertyReturns(result1 error) {
fake.setPropertyMutex.Lock()
defer fake.setPropertyMutex.Unlock()
fake.SetPropertyStub = nil
fake.setPropertyReturns = struct {
result1 error
@ -695,6 +803,8 @@ func (fake *FakeVolume) SetPropertyReturns(result1 error) {
}
func (fake *FakeVolume) SetPropertyReturnsOnCall(i int, result1 error) {
fake.setPropertyMutex.Lock()
defer fake.setPropertyMutex.Unlock()
fake.SetPropertyStub = nil
if fake.setPropertyReturnsOnCall == nil {
fake.setPropertyReturnsOnCall = make(map[int]struct {
@ -731,6 +841,12 @@ func (fake *FakeVolume) StreamInCallCount() int {
return len(fake.streamInArgsForCall)
}
func (fake *FakeVolume) StreamInCalls(stub func(string, io.Reader) error) {
fake.streamInMutex.Lock()
defer fake.streamInMutex.Unlock()
fake.StreamInStub = stub
}
func (fake *FakeVolume) StreamInArgsForCall(i int) (string, io.Reader) {
fake.streamInMutex.RLock()
defer fake.streamInMutex.RUnlock()
@ -739,6 +855,8 @@ func (fake *FakeVolume) StreamInArgsForCall(i int) (string, io.Reader) {
}
func (fake *FakeVolume) StreamInReturns(result1 error) {
fake.streamInMutex.Lock()
defer fake.streamInMutex.Unlock()
fake.StreamInStub = nil
fake.streamInReturns = struct {
result1 error
@ -746,6 +864,8 @@ func (fake *FakeVolume) StreamInReturns(result1 error) {
}
func (fake *FakeVolume) StreamInReturnsOnCall(i int, result1 error) {
fake.streamInMutex.Lock()
defer fake.streamInMutex.Unlock()
fake.StreamInStub = nil
if fake.streamInReturnsOnCall == nil {
fake.streamInReturnsOnCall = make(map[int]struct {
@ -781,6 +901,12 @@ func (fake *FakeVolume) StreamOutCallCount() int {
return len(fake.streamOutArgsForCall)
}
func (fake *FakeVolume) StreamOutCalls(stub func(string) (io.ReadCloser, error)) {
fake.streamOutMutex.Lock()
defer fake.streamOutMutex.Unlock()
fake.StreamOutStub = stub
}
func (fake *FakeVolume) StreamOutArgsForCall(i int) string {
fake.streamOutMutex.RLock()
defer fake.streamOutMutex.RUnlock()
@ -789,6 +915,8 @@ func (fake *FakeVolume) StreamOutArgsForCall(i int) string {
}
func (fake *FakeVolume) StreamOutReturns(result1 io.ReadCloser, result2 error) {
fake.streamOutMutex.Lock()
defer fake.streamOutMutex.Unlock()
fake.StreamOutStub = nil
fake.streamOutReturns = struct {
result1 io.ReadCloser
@ -797,6 +925,8 @@ func (fake *FakeVolume) StreamOutReturns(result1 io.ReadCloser, result2 error) {
}
func (fake *FakeVolume) StreamOutReturnsOnCall(i int, result1 io.ReadCloser, result2 error) {
fake.streamOutMutex.Lock()
defer fake.streamOutMutex.Unlock()
fake.StreamOutStub = nil
if fake.streamOutReturnsOnCall == nil {
fake.streamOutReturnsOnCall = make(map[int]struct {
@ -833,7 +963,15 @@ func (fake *FakeVolume) WorkerNameCallCount() int {
return len(fake.workerNameArgsForCall)
}
func (fake *FakeVolume) WorkerNameCalls(stub func() string) {
fake.workerNameMutex.Lock()
defer fake.workerNameMutex.Unlock()
fake.WorkerNameStub = stub
}
func (fake *FakeVolume) WorkerNameReturns(result1 string) {
fake.workerNameMutex.Lock()
defer fake.workerNameMutex.Unlock()
fake.WorkerNameStub = nil
fake.workerNameReturns = struct {
result1 string
@ -841,6 +979,8 @@ func (fake *FakeVolume) WorkerNameReturns(result1 string) {
}
func (fake *FakeVolume) WorkerNameReturnsOnCall(i int, result1 string) {
fake.workerNameMutex.Lock()
defer fake.workerNameMutex.Unlock()
fake.WorkerNameStub = nil
if fake.workerNameReturnsOnCall == nil {
fake.workerNameReturnsOnCall = make(map[int]struct {

View File

@ -2,11 +2,11 @@
package workerfakes
import (
sync "sync"
"sync"
lager "code.cloudfoundry.org/lager"
db "github.com/concourse/concourse/atc/db"
worker "github.com/concourse/concourse/atc/worker"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
)
type FakeVolumeClient struct {
@ -194,6 +194,12 @@ func (fake *FakeVolumeClient) CreateVolumeCallCount() int {
return len(fake.createVolumeArgsForCall)
}
func (fake *FakeVolumeClient) CreateVolumeCalls(stub func(lager.Logger, worker.VolumeSpec, int, string, db.VolumeType) (worker.Volume, error)) {
fake.createVolumeMutex.Lock()
defer fake.createVolumeMutex.Unlock()
fake.CreateVolumeStub = stub
}
func (fake *FakeVolumeClient) CreateVolumeArgsForCall(i int) (lager.Logger, worker.VolumeSpec, int, string, db.VolumeType) {
fake.createVolumeMutex.RLock()
defer fake.createVolumeMutex.RUnlock()
@ -202,6 +208,8 @@ func (fake *FakeVolumeClient) CreateVolumeArgsForCall(i int) (lager.Logger, work
}
func (fake *FakeVolumeClient) CreateVolumeReturns(result1 worker.Volume, result2 error) {
fake.createVolumeMutex.Lock()
defer fake.createVolumeMutex.Unlock()
fake.CreateVolumeStub = nil
fake.createVolumeReturns = struct {
result1 worker.Volume
@ -210,6 +218,8 @@ func (fake *FakeVolumeClient) CreateVolumeReturns(result1 worker.Volume, result2
}
func (fake *FakeVolumeClient) CreateVolumeReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.createVolumeMutex.Lock()
defer fake.createVolumeMutex.Unlock()
fake.CreateVolumeStub = nil
if fake.createVolumeReturnsOnCall == nil {
fake.createVolumeReturnsOnCall = make(map[int]struct {
@ -252,6 +262,12 @@ func (fake *FakeVolumeClient) CreateVolumeForTaskCacheCallCount() int {
return len(fake.createVolumeForTaskCacheArgsForCall)
}
func (fake *FakeVolumeClient) CreateVolumeForTaskCacheCalls(stub func(lager.Logger, worker.VolumeSpec, int, int, string, string) (worker.Volume, error)) {
fake.createVolumeForTaskCacheMutex.Lock()
defer fake.createVolumeForTaskCacheMutex.Unlock()
fake.CreateVolumeForTaskCacheStub = stub
}
func (fake *FakeVolumeClient) CreateVolumeForTaskCacheArgsForCall(i int) (lager.Logger, worker.VolumeSpec, int, int, string, string) {
fake.createVolumeForTaskCacheMutex.RLock()
defer fake.createVolumeForTaskCacheMutex.RUnlock()
@ -260,6 +276,8 @@ func (fake *FakeVolumeClient) CreateVolumeForTaskCacheArgsForCall(i int) (lager.
}
func (fake *FakeVolumeClient) CreateVolumeForTaskCacheReturns(result1 worker.Volume, result2 error) {
fake.createVolumeForTaskCacheMutex.Lock()
defer fake.createVolumeForTaskCacheMutex.Unlock()
fake.CreateVolumeForTaskCacheStub = nil
fake.createVolumeForTaskCacheReturns = struct {
result1 worker.Volume
@ -268,6 +286,8 @@ func (fake *FakeVolumeClient) CreateVolumeForTaskCacheReturns(result1 worker.Vol
}
func (fake *FakeVolumeClient) CreateVolumeForTaskCacheReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.createVolumeForTaskCacheMutex.Lock()
defer fake.createVolumeForTaskCacheMutex.Unlock()
fake.CreateVolumeForTaskCacheStub = nil
if fake.createVolumeForTaskCacheReturnsOnCall == nil {
fake.createVolumeForTaskCacheReturnsOnCall = make(map[int]struct {
@ -310,6 +330,12 @@ func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerCallCount() int {
return len(fake.findOrCreateCOWVolumeForContainerArgsForCall)
}
func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerCalls(stub func(lager.Logger, worker.VolumeSpec, db.CreatingContainer, worker.Volume, int, string) (worker.Volume, error)) {
fake.findOrCreateCOWVolumeForContainerMutex.Lock()
defer fake.findOrCreateCOWVolumeForContainerMutex.Unlock()
fake.FindOrCreateCOWVolumeForContainerStub = stub
}
func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerArgsForCall(i int) (lager.Logger, worker.VolumeSpec, db.CreatingContainer, worker.Volume, int, string) {
fake.findOrCreateCOWVolumeForContainerMutex.RLock()
defer fake.findOrCreateCOWVolumeForContainerMutex.RUnlock()
@ -318,6 +344,8 @@ func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerArgsForCall(i int
}
func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerReturns(result1 worker.Volume, result2 error) {
fake.findOrCreateCOWVolumeForContainerMutex.Lock()
defer fake.findOrCreateCOWVolumeForContainerMutex.Unlock()
fake.FindOrCreateCOWVolumeForContainerStub = nil
fake.findOrCreateCOWVolumeForContainerReturns = struct {
result1 worker.Volume
@ -326,6 +354,8 @@ func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerReturns(result1 w
}
func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.findOrCreateCOWVolumeForContainerMutex.Lock()
defer fake.findOrCreateCOWVolumeForContainerMutex.Unlock()
fake.FindOrCreateCOWVolumeForContainerStub = nil
if fake.findOrCreateCOWVolumeForContainerReturnsOnCall == nil {
fake.findOrCreateCOWVolumeForContainerReturnsOnCall = make(map[int]struct {
@ -366,6 +396,12 @@ func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeCallCount() i
return len(fake.findOrCreateVolumeForBaseResourceTypeArgsForCall)
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeCalls(stub func(lager.Logger, worker.VolumeSpec, int, string) (worker.Volume, error)) {
fake.findOrCreateVolumeForBaseResourceTypeMutex.Lock()
defer fake.findOrCreateVolumeForBaseResourceTypeMutex.Unlock()
fake.FindOrCreateVolumeForBaseResourceTypeStub = stub
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeArgsForCall(i int) (lager.Logger, worker.VolumeSpec, int, string) {
fake.findOrCreateVolumeForBaseResourceTypeMutex.RLock()
defer fake.findOrCreateVolumeForBaseResourceTypeMutex.RUnlock()
@ -374,6 +410,8 @@ func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeArgsForCall(i
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeReturns(result1 worker.Volume, result2 error) {
fake.findOrCreateVolumeForBaseResourceTypeMutex.Lock()
defer fake.findOrCreateVolumeForBaseResourceTypeMutex.Unlock()
fake.FindOrCreateVolumeForBaseResourceTypeStub = nil
fake.findOrCreateVolumeForBaseResourceTypeReturns = struct {
result1 worker.Volume
@ -382,6 +420,8 @@ func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeReturns(resul
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.findOrCreateVolumeForBaseResourceTypeMutex.Lock()
defer fake.findOrCreateVolumeForBaseResourceTypeMutex.Unlock()
fake.FindOrCreateVolumeForBaseResourceTypeStub = nil
if fake.findOrCreateVolumeForBaseResourceTypeReturnsOnCall == nil {
fake.findOrCreateVolumeForBaseResourceTypeReturnsOnCall = make(map[int]struct {
@ -423,6 +463,12 @@ func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerCallCount() int {
return len(fake.findOrCreateVolumeForContainerArgsForCall)
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerCalls(stub func(lager.Logger, worker.VolumeSpec, db.CreatingContainer, int, string) (worker.Volume, error)) {
fake.findOrCreateVolumeForContainerMutex.Lock()
defer fake.findOrCreateVolumeForContainerMutex.Unlock()
fake.FindOrCreateVolumeForContainerStub = stub
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerArgsForCall(i int) (lager.Logger, worker.VolumeSpec, db.CreatingContainer, int, string) {
fake.findOrCreateVolumeForContainerMutex.RLock()
defer fake.findOrCreateVolumeForContainerMutex.RUnlock()
@ -431,6 +477,8 @@ func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerArgsForCall(i int) (
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerReturns(result1 worker.Volume, result2 error) {
fake.findOrCreateVolumeForContainerMutex.Lock()
defer fake.findOrCreateVolumeForContainerMutex.Unlock()
fake.FindOrCreateVolumeForContainerStub = nil
fake.findOrCreateVolumeForContainerReturns = struct {
result1 worker.Volume
@ -439,6 +487,8 @@ func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerReturns(result1 work
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.findOrCreateVolumeForContainerMutex.Lock()
defer fake.findOrCreateVolumeForContainerMutex.Unlock()
fake.FindOrCreateVolumeForContainerStub = nil
if fake.findOrCreateVolumeForContainerReturnsOnCall == nil {
fake.findOrCreateVolumeForContainerReturnsOnCall = make(map[int]struct {
@ -476,6 +526,12 @@ func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsCallCount() int
return len(fake.findOrCreateVolumeForResourceCertsArgsForCall)
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsCalls(stub func(lager.Logger) (worker.Volume, bool, error)) {
fake.findOrCreateVolumeForResourceCertsMutex.Lock()
defer fake.findOrCreateVolumeForResourceCertsMutex.Unlock()
fake.FindOrCreateVolumeForResourceCertsStub = stub
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsArgsForCall(i int) lager.Logger {
fake.findOrCreateVolumeForResourceCertsMutex.RLock()
defer fake.findOrCreateVolumeForResourceCertsMutex.RUnlock()
@ -484,6 +540,8 @@ func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsArgsForCall(i in
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.findOrCreateVolumeForResourceCertsMutex.Lock()
defer fake.findOrCreateVolumeForResourceCertsMutex.Unlock()
fake.FindOrCreateVolumeForResourceCertsStub = nil
fake.findOrCreateVolumeForResourceCertsReturns = struct {
result1 worker.Volume
@ -493,6 +551,8 @@ func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsReturns(result1
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.findOrCreateVolumeForResourceCertsMutex.Lock()
defer fake.findOrCreateVolumeForResourceCertsMutex.Unlock()
fake.FindOrCreateVolumeForResourceCertsStub = nil
if fake.findOrCreateVolumeForResourceCertsReturnsOnCall == nil {
fake.findOrCreateVolumeForResourceCertsReturnsOnCall = make(map[int]struct {
@ -533,6 +593,12 @@ func (fake *FakeVolumeClient) FindVolumeForResourceCacheCallCount() int {
return len(fake.findVolumeForResourceCacheArgsForCall)
}
func (fake *FakeVolumeClient) FindVolumeForResourceCacheCalls(stub func(lager.Logger, db.UsedResourceCache) (worker.Volume, bool, error)) {
fake.findVolumeForResourceCacheMutex.Lock()
defer fake.findVolumeForResourceCacheMutex.Unlock()
fake.FindVolumeForResourceCacheStub = stub
}
func (fake *FakeVolumeClient) FindVolumeForResourceCacheArgsForCall(i int) (lager.Logger, db.UsedResourceCache) {
fake.findVolumeForResourceCacheMutex.RLock()
defer fake.findVolumeForResourceCacheMutex.RUnlock()
@ -541,6 +607,8 @@ func (fake *FakeVolumeClient) FindVolumeForResourceCacheArgsForCall(i int) (lage
}
func (fake *FakeVolumeClient) FindVolumeForResourceCacheReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForResourceCacheMutex.Lock()
defer fake.findVolumeForResourceCacheMutex.Unlock()
fake.FindVolumeForResourceCacheStub = nil
fake.findVolumeForResourceCacheReturns = struct {
result1 worker.Volume
@ -550,6 +618,8 @@ func (fake *FakeVolumeClient) FindVolumeForResourceCacheReturns(result1 worker.V
}
func (fake *FakeVolumeClient) FindVolumeForResourceCacheReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForResourceCacheMutex.Lock()
defer fake.findVolumeForResourceCacheMutex.Unlock()
fake.FindVolumeForResourceCacheStub = nil
if fake.findVolumeForResourceCacheReturnsOnCall == nil {
fake.findVolumeForResourceCacheReturnsOnCall = make(map[int]struct {
@ -593,6 +663,12 @@ func (fake *FakeVolumeClient) FindVolumeForTaskCacheCallCount() int {
return len(fake.findVolumeForTaskCacheArgsForCall)
}
func (fake *FakeVolumeClient) FindVolumeForTaskCacheCalls(stub func(lager.Logger, int, int, string, string) (worker.Volume, bool, error)) {
fake.findVolumeForTaskCacheMutex.Lock()
defer fake.findVolumeForTaskCacheMutex.Unlock()
fake.FindVolumeForTaskCacheStub = stub
}
func (fake *FakeVolumeClient) FindVolumeForTaskCacheArgsForCall(i int) (lager.Logger, int, int, string, string) {
fake.findVolumeForTaskCacheMutex.RLock()
defer fake.findVolumeForTaskCacheMutex.RUnlock()
@ -601,6 +677,8 @@ func (fake *FakeVolumeClient) FindVolumeForTaskCacheArgsForCall(i int) (lager.Lo
}
func (fake *FakeVolumeClient) FindVolumeForTaskCacheReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForTaskCacheMutex.Lock()
defer fake.findVolumeForTaskCacheMutex.Unlock()
fake.FindVolumeForTaskCacheStub = nil
fake.findVolumeForTaskCacheReturns = struct {
result1 worker.Volume
@ -610,6 +688,8 @@ func (fake *FakeVolumeClient) FindVolumeForTaskCacheReturns(result1 worker.Volum
}
func (fake *FakeVolumeClient) FindVolumeForTaskCacheReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForTaskCacheMutex.Lock()
defer fake.findVolumeForTaskCacheMutex.Unlock()
fake.FindVolumeForTaskCacheStub = nil
if fake.findVolumeForTaskCacheReturnsOnCall == nil {
fake.findVolumeForTaskCacheReturnsOnCall = make(map[int]struct {
@ -650,6 +730,12 @@ func (fake *FakeVolumeClient) LookupVolumeCallCount() int {
return len(fake.lookupVolumeArgsForCall)
}
func (fake *FakeVolumeClient) LookupVolumeCalls(stub func(lager.Logger, string) (worker.Volume, bool, error)) {
fake.lookupVolumeMutex.Lock()
defer fake.lookupVolumeMutex.Unlock()
fake.LookupVolumeStub = stub
}
func (fake *FakeVolumeClient) LookupVolumeArgsForCall(i int) (lager.Logger, string) {
fake.lookupVolumeMutex.RLock()
defer fake.lookupVolumeMutex.RUnlock()
@ -658,6 +744,8 @@ func (fake *FakeVolumeClient) LookupVolumeArgsForCall(i int) (lager.Logger, stri
}
func (fake *FakeVolumeClient) LookupVolumeReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.lookupVolumeMutex.Lock()
defer fake.lookupVolumeMutex.Unlock()
fake.LookupVolumeStub = nil
fake.lookupVolumeReturns = struct {
result1 worker.Volume
@ -667,6 +755,8 @@ func (fake *FakeVolumeClient) LookupVolumeReturns(result1 worker.Volume, result2
}
func (fake *FakeVolumeClient) LookupVolumeReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.lookupVolumeMutex.Lock()
defer fake.lookupVolumeMutex.Unlock()
fake.LookupVolumeStub = nil
if fake.lookupVolumeReturnsOnCall == nil {
fake.lookupVolumeReturnsOnCall = make(map[int]struct {

View File

@ -2,19 +2,31 @@
package workerfakes
import (
context "context"
sync "sync"
time "time"
"context"
"sync"
"time"
garden "code.cloudfoundry.org/garden"
lager "code.cloudfoundry.org/lager"
atc "github.com/concourse/concourse/atc"
db "github.com/concourse/concourse/atc/db"
worker "github.com/concourse/concourse/atc/worker"
version "github.com/cppforlife/go-semi-semantic/version"
"code.cloudfoundry.org/garden"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
"github.com/cppforlife/go-semi-semantic/version"
)
type FakeWorker struct {
ActiveTasksStub func() (int, error)
activeTasksMutex sync.RWMutex
activeTasksArgsForCall []struct {
}
activeTasksReturns struct {
result1 int
result2 error
}
activeTasksReturnsOnCall map[int]struct {
result1 int
result2 error
}
BuildContainersStub func() int
buildContainersMutex sync.RWMutex
buildContainersArgsForCall []struct {
@ -56,6 +68,16 @@ type FakeWorker struct {
result1 worker.Volume
result2 error
}
DecreaseActiveTasksStub func() error
decreaseActiveTasksMutex sync.RWMutex
decreaseActiveTasksArgsForCall []struct {
}
decreaseActiveTasksReturns struct {
result1 error
}
decreaseActiveTasksReturnsOnCall map[int]struct {
result1 error
}
DescriptionStub func() string
descriptionMutex sync.RWMutex
descriptionArgsForCall []struct {
@ -171,6 +193,16 @@ type FakeWorker struct {
gardenClientReturnsOnCall map[int]struct {
result1 garden.Client
}
IncreaseActiveTasksStub func() error
increaseActiveTasksMutex sync.RWMutex
increaseActiveTasksArgsForCall []struct {
}
increaseActiveTasksReturns struct {
result1 error
}
increaseActiveTasksReturnsOnCall map[int]struct {
result1 error
}
IsOwnedByTeamStub func() bool
isOwnedByTeamMutex sync.RWMutex
isOwnedByTeamArgsForCall []struct {
@ -265,6 +297,61 @@ type FakeWorker struct {
invocationsMutex sync.RWMutex
}
func (fake *FakeWorker) ActiveTasks() (int, error) {
fake.activeTasksMutex.Lock()
ret, specificReturn := fake.activeTasksReturnsOnCall[len(fake.activeTasksArgsForCall)]
fake.activeTasksArgsForCall = append(fake.activeTasksArgsForCall, struct {
}{})
fake.recordInvocation("ActiveTasks", []interface{}{})
fake.activeTasksMutex.Unlock()
if fake.ActiveTasksStub != nil {
return fake.ActiveTasksStub()
}
if specificReturn {
return ret.result1, ret.result2
}
fakeReturns := fake.activeTasksReturns
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeWorker) ActiveTasksCallCount() int {
fake.activeTasksMutex.RLock()
defer fake.activeTasksMutex.RUnlock()
return len(fake.activeTasksArgsForCall)
}
func (fake *FakeWorker) ActiveTasksCalls(stub func() (int, error)) {
fake.activeTasksMutex.Lock()
defer fake.activeTasksMutex.Unlock()
fake.ActiveTasksStub = stub
}
func (fake *FakeWorker) ActiveTasksReturns(result1 int, result2 error) {
fake.activeTasksMutex.Lock()
defer fake.activeTasksMutex.Unlock()
fake.ActiveTasksStub = nil
fake.activeTasksReturns = struct {
result1 int
result2 error
}{result1, result2}
}
func (fake *FakeWorker) ActiveTasksReturnsOnCall(i int, result1 int, result2 error) {
fake.activeTasksMutex.Lock()
defer fake.activeTasksMutex.Unlock()
fake.ActiveTasksStub = nil
if fake.activeTasksReturnsOnCall == nil {
fake.activeTasksReturnsOnCall = make(map[int]struct {
result1 int
result2 error
})
}
fake.activeTasksReturnsOnCall[i] = struct {
result1 int
result2 error
}{result1, result2}
}
func (fake *FakeWorker) BuildContainers() int {
fake.buildContainersMutex.Lock()
ret, specificReturn := fake.buildContainersReturnsOnCall[len(fake.buildContainersArgsForCall)]
@ -288,7 +375,15 @@ func (fake *FakeWorker) BuildContainersCallCount() int {
return len(fake.buildContainersArgsForCall)
}
func (fake *FakeWorker) BuildContainersCalls(stub func() int) {
fake.buildContainersMutex.Lock()
defer fake.buildContainersMutex.Unlock()
fake.BuildContainersStub = stub
}
func (fake *FakeWorker) BuildContainersReturns(result1 int) {
fake.buildContainersMutex.Lock()
defer fake.buildContainersMutex.Unlock()
fake.BuildContainersStub = nil
fake.buildContainersReturns = struct {
result1 int
@ -296,6 +391,8 @@ func (fake *FakeWorker) BuildContainersReturns(result1 int) {
}
func (fake *FakeWorker) BuildContainersReturnsOnCall(i int, result1 int) {
fake.buildContainersMutex.Lock()
defer fake.buildContainersMutex.Unlock()
fake.BuildContainersStub = nil
if fake.buildContainersReturnsOnCall == nil {
fake.buildContainersReturnsOnCall = make(map[int]struct {
@ -331,6 +428,12 @@ func (fake *FakeWorker) CertsVolumeCallCount() int {
return len(fake.certsVolumeArgsForCall)
}
func (fake *FakeWorker) CertsVolumeCalls(stub func(lager.Logger) (worker.Volume, bool, error)) {
fake.certsVolumeMutex.Lock()
defer fake.certsVolumeMutex.Unlock()
fake.CertsVolumeStub = stub
}
func (fake *FakeWorker) CertsVolumeArgsForCall(i int) lager.Logger {
fake.certsVolumeMutex.RLock()
defer fake.certsVolumeMutex.RUnlock()
@ -339,6 +442,8 @@ func (fake *FakeWorker) CertsVolumeArgsForCall(i int) lager.Logger {
}
func (fake *FakeWorker) CertsVolumeReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.certsVolumeMutex.Lock()
defer fake.certsVolumeMutex.Unlock()
fake.CertsVolumeStub = nil
fake.certsVolumeReturns = struct {
result1 worker.Volume
@ -348,6 +453,8 @@ func (fake *FakeWorker) CertsVolumeReturns(result1 worker.Volume, result2 bool,
}
func (fake *FakeWorker) CertsVolumeReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.certsVolumeMutex.Lock()
defer fake.certsVolumeMutex.Unlock()
fake.CertsVolumeStub = nil
if fake.certsVolumeReturnsOnCall == nil {
fake.certsVolumeReturnsOnCall = make(map[int]struct {
@ -390,6 +497,12 @@ func (fake *FakeWorker) CreateVolumeCallCount() int {
return len(fake.createVolumeArgsForCall)
}
func (fake *FakeWorker) CreateVolumeCalls(stub func(lager.Logger, worker.VolumeSpec, int, db.VolumeType) (worker.Volume, error)) {
fake.createVolumeMutex.Lock()
defer fake.createVolumeMutex.Unlock()
fake.CreateVolumeStub = stub
}
func (fake *FakeWorker) CreateVolumeArgsForCall(i int) (lager.Logger, worker.VolumeSpec, int, db.VolumeType) {
fake.createVolumeMutex.RLock()
defer fake.createVolumeMutex.RUnlock()
@ -398,6 +511,8 @@ func (fake *FakeWorker) CreateVolumeArgsForCall(i int) (lager.Logger, worker.Vol
}
func (fake *FakeWorker) CreateVolumeReturns(result1 worker.Volume, result2 error) {
fake.createVolumeMutex.Lock()
defer fake.createVolumeMutex.Unlock()
fake.CreateVolumeStub = nil
fake.createVolumeReturns = struct {
result1 worker.Volume
@ -406,6 +521,8 @@ func (fake *FakeWorker) CreateVolumeReturns(result1 worker.Volume, result2 error
}
func (fake *FakeWorker) CreateVolumeReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.createVolumeMutex.Lock()
defer fake.createVolumeMutex.Unlock()
fake.CreateVolumeStub = nil
if fake.createVolumeReturnsOnCall == nil {
fake.createVolumeReturnsOnCall = make(map[int]struct {
@ -419,6 +536,58 @@ func (fake *FakeWorker) CreateVolumeReturnsOnCall(i int, result1 worker.Volume,
}{result1, result2}
}
func (fake *FakeWorker) DecreaseActiveTasks() error {
fake.decreaseActiveTasksMutex.Lock()
ret, specificReturn := fake.decreaseActiveTasksReturnsOnCall[len(fake.decreaseActiveTasksArgsForCall)]
fake.decreaseActiveTasksArgsForCall = append(fake.decreaseActiveTasksArgsForCall, struct {
}{})
fake.recordInvocation("DecreaseActiveTasks", []interface{}{})
fake.decreaseActiveTasksMutex.Unlock()
if fake.DecreaseActiveTasksStub != nil {
return fake.DecreaseActiveTasksStub()
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.decreaseActiveTasksReturns
return fakeReturns.result1
}
func (fake *FakeWorker) DecreaseActiveTasksCallCount() int {
fake.decreaseActiveTasksMutex.RLock()
defer fake.decreaseActiveTasksMutex.RUnlock()
return len(fake.decreaseActiveTasksArgsForCall)
}
func (fake *FakeWorker) DecreaseActiveTasksCalls(stub func() error) {
fake.decreaseActiveTasksMutex.Lock()
defer fake.decreaseActiveTasksMutex.Unlock()
fake.DecreaseActiveTasksStub = stub
}
func (fake *FakeWorker) DecreaseActiveTasksReturns(result1 error) {
fake.decreaseActiveTasksMutex.Lock()
defer fake.decreaseActiveTasksMutex.Unlock()
fake.DecreaseActiveTasksStub = nil
fake.decreaseActiveTasksReturns = struct {
result1 error
}{result1}
}
func (fake *FakeWorker) DecreaseActiveTasksReturnsOnCall(i int, result1 error) {
fake.decreaseActiveTasksMutex.Lock()
defer fake.decreaseActiveTasksMutex.Unlock()
fake.DecreaseActiveTasksStub = nil
if fake.decreaseActiveTasksReturnsOnCall == nil {
fake.decreaseActiveTasksReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.decreaseActiveTasksReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *FakeWorker) Description() string {
fake.descriptionMutex.Lock()
ret, specificReturn := fake.descriptionReturnsOnCall[len(fake.descriptionArgsForCall)]
@ -442,7 +611,15 @@ func (fake *FakeWorker) DescriptionCallCount() int {
return len(fake.descriptionArgsForCall)
}
func (fake *FakeWorker) DescriptionCalls(stub func() string) {
fake.descriptionMutex.Lock()
defer fake.descriptionMutex.Unlock()
fake.DescriptionStub = stub
}
func (fake *FakeWorker) DescriptionReturns(result1 string) {
fake.descriptionMutex.Lock()
defer fake.descriptionMutex.Unlock()
fake.DescriptionStub = nil
fake.descriptionReturns = struct {
result1 string
@ -450,6 +627,8 @@ func (fake *FakeWorker) DescriptionReturns(result1 string) {
}
func (fake *FakeWorker) DescriptionReturnsOnCall(i int, result1 string) {
fake.descriptionMutex.Lock()
defer fake.descriptionMutex.Unlock()
fake.DescriptionStub = nil
if fake.descriptionReturnsOnCall == nil {
fake.descriptionReturnsOnCall = make(map[int]struct {
@ -488,6 +667,12 @@ func (fake *FakeWorker) EnsureDBContainerExistsCallCount() int {
return len(fake.ensureDBContainerExistsArgsForCall)
}
func (fake *FakeWorker) EnsureDBContainerExistsCalls(stub func(context.Context, lager.Logger, db.ContainerOwner, db.ContainerMetadata) error) {
fake.ensureDBContainerExistsMutex.Lock()
defer fake.ensureDBContainerExistsMutex.Unlock()
fake.EnsureDBContainerExistsStub = stub
}
func (fake *FakeWorker) EnsureDBContainerExistsArgsForCall(i int) (context.Context, lager.Logger, db.ContainerOwner, db.ContainerMetadata) {
fake.ensureDBContainerExistsMutex.RLock()
defer fake.ensureDBContainerExistsMutex.RUnlock()
@ -496,6 +681,8 @@ func (fake *FakeWorker) EnsureDBContainerExistsArgsForCall(i int) (context.Conte
}
func (fake *FakeWorker) EnsureDBContainerExistsReturns(result1 error) {
fake.ensureDBContainerExistsMutex.Lock()
defer fake.ensureDBContainerExistsMutex.Unlock()
fake.EnsureDBContainerExistsStub = nil
fake.ensureDBContainerExistsReturns = struct {
result1 error
@ -503,6 +690,8 @@ func (fake *FakeWorker) EnsureDBContainerExistsReturns(result1 error) {
}
func (fake *FakeWorker) EnsureDBContainerExistsReturnsOnCall(i int, result1 error) {
fake.ensureDBContainerExistsMutex.Lock()
defer fake.ensureDBContainerExistsMutex.Unlock()
fake.EnsureDBContainerExistsStub = nil
if fake.ensureDBContainerExistsReturnsOnCall == nil {
fake.ensureDBContainerExistsReturnsOnCall = make(map[int]struct {
@ -537,7 +726,15 @@ func (fake *FakeWorker) EphemeralCallCount() int {
return len(fake.ephemeralArgsForCall)
}
func (fake *FakeWorker) EphemeralCalls(stub func() bool) {
fake.ephemeralMutex.Lock()
defer fake.ephemeralMutex.Unlock()
fake.EphemeralStub = stub
}
func (fake *FakeWorker) EphemeralReturns(result1 bool) {
fake.ephemeralMutex.Lock()
defer fake.ephemeralMutex.Unlock()
fake.EphemeralStub = nil
fake.ephemeralReturns = struct {
result1 bool
@ -545,6 +742,8 @@ func (fake *FakeWorker) EphemeralReturns(result1 bool) {
}
func (fake *FakeWorker) EphemeralReturnsOnCall(i int, result1 bool) {
fake.ephemeralMutex.Lock()
defer fake.ephemeralMutex.Unlock()
fake.EphemeralStub = nil
if fake.ephemeralReturnsOnCall == nil {
fake.ephemeralReturnsOnCall = make(map[int]struct {
@ -582,6 +781,12 @@ func (fake *FakeWorker) FindContainerByHandleCallCount() int {
return len(fake.findContainerByHandleArgsForCall)
}
func (fake *FakeWorker) FindContainerByHandleCalls(stub func(lager.Logger, int, string) (worker.Container, bool, error)) {
fake.findContainerByHandleMutex.Lock()
defer fake.findContainerByHandleMutex.Unlock()
fake.FindContainerByHandleStub = stub
}
func (fake *FakeWorker) FindContainerByHandleArgsForCall(i int) (lager.Logger, int, string) {
fake.findContainerByHandleMutex.RLock()
defer fake.findContainerByHandleMutex.RUnlock()
@ -590,6 +795,8 @@ func (fake *FakeWorker) FindContainerByHandleArgsForCall(i int) (lager.Logger, i
}
func (fake *FakeWorker) FindContainerByHandleReturns(result1 worker.Container, result2 bool, result3 error) {
fake.findContainerByHandleMutex.Lock()
defer fake.findContainerByHandleMutex.Unlock()
fake.FindContainerByHandleStub = nil
fake.findContainerByHandleReturns = struct {
result1 worker.Container
@ -599,6 +806,8 @@ func (fake *FakeWorker) FindContainerByHandleReturns(result1 worker.Container, r
}
func (fake *FakeWorker) FindContainerByHandleReturnsOnCall(i int, result1 worker.Container, result2 bool, result3 error) {
fake.findContainerByHandleMutex.Lock()
defer fake.findContainerByHandleMutex.Unlock()
fake.FindContainerByHandleStub = nil
if fake.findContainerByHandleReturnsOnCall == nil {
fake.findContainerByHandleReturnsOnCall = make(map[int]struct {
@ -644,6 +853,12 @@ func (fake *FakeWorker) FindOrCreateContainerCallCount() int {
return len(fake.findOrCreateContainerArgsForCall)
}
func (fake *FakeWorker) FindOrCreateContainerCalls(stub func(context.Context, lager.Logger, worker.ImageFetchingDelegate, db.ContainerOwner, db.ContainerMetadata, worker.ContainerSpec, atc.VersionedResourceTypes) (worker.Container, error)) {
fake.findOrCreateContainerMutex.Lock()
defer fake.findOrCreateContainerMutex.Unlock()
fake.FindOrCreateContainerStub = stub
}
func (fake *FakeWorker) FindOrCreateContainerArgsForCall(i int) (context.Context, lager.Logger, worker.ImageFetchingDelegate, db.ContainerOwner, db.ContainerMetadata, worker.ContainerSpec, atc.VersionedResourceTypes) {
fake.findOrCreateContainerMutex.RLock()
defer fake.findOrCreateContainerMutex.RUnlock()
@ -652,6 +867,8 @@ func (fake *FakeWorker) FindOrCreateContainerArgsForCall(i int) (context.Context
}
func (fake *FakeWorker) FindOrCreateContainerReturns(result1 worker.Container, result2 error) {
fake.findOrCreateContainerMutex.Lock()
defer fake.findOrCreateContainerMutex.Unlock()
fake.FindOrCreateContainerStub = nil
fake.findOrCreateContainerReturns = struct {
result1 worker.Container
@ -660,6 +877,8 @@ func (fake *FakeWorker) FindOrCreateContainerReturns(result1 worker.Container, r
}
func (fake *FakeWorker) FindOrCreateContainerReturnsOnCall(i int, result1 worker.Container, result2 error) {
fake.findOrCreateContainerMutex.Lock()
defer fake.findOrCreateContainerMutex.Unlock()
fake.FindOrCreateContainerStub = nil
if fake.findOrCreateContainerReturnsOnCall == nil {
fake.findOrCreateContainerReturnsOnCall = make(map[int]struct {
@ -698,6 +917,12 @@ func (fake *FakeWorker) FindVolumeForResourceCacheCallCount() int {
return len(fake.findVolumeForResourceCacheArgsForCall)
}
func (fake *FakeWorker) FindVolumeForResourceCacheCalls(stub func(lager.Logger, db.UsedResourceCache) (worker.Volume, bool, error)) {
fake.findVolumeForResourceCacheMutex.Lock()
defer fake.findVolumeForResourceCacheMutex.Unlock()
fake.FindVolumeForResourceCacheStub = stub
}
func (fake *FakeWorker) FindVolumeForResourceCacheArgsForCall(i int) (lager.Logger, db.UsedResourceCache) {
fake.findVolumeForResourceCacheMutex.RLock()
defer fake.findVolumeForResourceCacheMutex.RUnlock()
@ -706,6 +931,8 @@ func (fake *FakeWorker) FindVolumeForResourceCacheArgsForCall(i int) (lager.Logg
}
func (fake *FakeWorker) FindVolumeForResourceCacheReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForResourceCacheMutex.Lock()
defer fake.findVolumeForResourceCacheMutex.Unlock()
fake.FindVolumeForResourceCacheStub = nil
fake.findVolumeForResourceCacheReturns = struct {
result1 worker.Volume
@ -715,6 +942,8 @@ func (fake *FakeWorker) FindVolumeForResourceCacheReturns(result1 worker.Volume,
}
func (fake *FakeWorker) FindVolumeForResourceCacheReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForResourceCacheMutex.Lock()
defer fake.findVolumeForResourceCacheMutex.Unlock()
fake.FindVolumeForResourceCacheStub = nil
if fake.findVolumeForResourceCacheReturnsOnCall == nil {
fake.findVolumeForResourceCacheReturnsOnCall = make(map[int]struct {
@ -758,6 +987,12 @@ func (fake *FakeWorker) FindVolumeForTaskCacheCallCount() int {
return len(fake.findVolumeForTaskCacheArgsForCall)
}
func (fake *FakeWorker) FindVolumeForTaskCacheCalls(stub func(lager.Logger, int, int, string, string) (worker.Volume, bool, error)) {
fake.findVolumeForTaskCacheMutex.Lock()
defer fake.findVolumeForTaskCacheMutex.Unlock()
fake.FindVolumeForTaskCacheStub = stub
}
func (fake *FakeWorker) FindVolumeForTaskCacheArgsForCall(i int) (lager.Logger, int, int, string, string) {
fake.findVolumeForTaskCacheMutex.RLock()
defer fake.findVolumeForTaskCacheMutex.RUnlock()
@ -766,6 +1001,8 @@ func (fake *FakeWorker) FindVolumeForTaskCacheArgsForCall(i int) (lager.Logger,
}
func (fake *FakeWorker) FindVolumeForTaskCacheReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForTaskCacheMutex.Lock()
defer fake.findVolumeForTaskCacheMutex.Unlock()
fake.FindVolumeForTaskCacheStub = nil
fake.findVolumeForTaskCacheReturns = struct {
result1 worker.Volume
@ -775,6 +1012,8 @@ func (fake *FakeWorker) FindVolumeForTaskCacheReturns(result1 worker.Volume, res
}
func (fake *FakeWorker) FindVolumeForTaskCacheReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForTaskCacheMutex.Lock()
defer fake.findVolumeForTaskCacheMutex.Unlock()
fake.FindVolumeForTaskCacheStub = nil
if fake.findVolumeForTaskCacheReturnsOnCall == nil {
fake.findVolumeForTaskCacheReturnsOnCall = make(map[int]struct {
@ -813,7 +1052,15 @@ func (fake *FakeWorker) GardenClientCallCount() int {
return len(fake.gardenClientArgsForCall)
}
func (fake *FakeWorker) GardenClientCalls(stub func() garden.Client) {
fake.gardenClientMutex.Lock()
defer fake.gardenClientMutex.Unlock()
fake.GardenClientStub = stub
}
func (fake *FakeWorker) GardenClientReturns(result1 garden.Client) {
fake.gardenClientMutex.Lock()
defer fake.gardenClientMutex.Unlock()
fake.GardenClientStub = nil
fake.gardenClientReturns = struct {
result1 garden.Client
@ -821,6 +1068,8 @@ func (fake *FakeWorker) GardenClientReturns(result1 garden.Client) {
}
func (fake *FakeWorker) GardenClientReturnsOnCall(i int, result1 garden.Client) {
fake.gardenClientMutex.Lock()
defer fake.gardenClientMutex.Unlock()
fake.GardenClientStub = nil
if fake.gardenClientReturnsOnCall == nil {
fake.gardenClientReturnsOnCall = make(map[int]struct {
@ -832,6 +1081,58 @@ func (fake *FakeWorker) GardenClientReturnsOnCall(i int, result1 garden.Client)
}{result1}
}
func (fake *FakeWorker) IncreaseActiveTasks() error {
fake.increaseActiveTasksMutex.Lock()
ret, specificReturn := fake.increaseActiveTasksReturnsOnCall[len(fake.increaseActiveTasksArgsForCall)]
fake.increaseActiveTasksArgsForCall = append(fake.increaseActiveTasksArgsForCall, struct {
}{})
fake.recordInvocation("IncreaseActiveTasks", []interface{}{})
fake.increaseActiveTasksMutex.Unlock()
if fake.IncreaseActiveTasksStub != nil {
return fake.IncreaseActiveTasksStub()
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.increaseActiveTasksReturns
return fakeReturns.result1
}
func (fake *FakeWorker) IncreaseActiveTasksCallCount() int {
fake.increaseActiveTasksMutex.RLock()
defer fake.increaseActiveTasksMutex.RUnlock()
return len(fake.increaseActiveTasksArgsForCall)
}
func (fake *FakeWorker) IncreaseActiveTasksCalls(stub func() error) {
fake.increaseActiveTasksMutex.Lock()
defer fake.increaseActiveTasksMutex.Unlock()
fake.IncreaseActiveTasksStub = stub
}
func (fake *FakeWorker) IncreaseActiveTasksReturns(result1 error) {
fake.increaseActiveTasksMutex.Lock()
defer fake.increaseActiveTasksMutex.Unlock()
fake.IncreaseActiveTasksStub = nil
fake.increaseActiveTasksReturns = struct {
result1 error
}{result1}
}
func (fake *FakeWorker) IncreaseActiveTasksReturnsOnCall(i int, result1 error) {
fake.increaseActiveTasksMutex.Lock()
defer fake.increaseActiveTasksMutex.Unlock()
fake.IncreaseActiveTasksStub = nil
if fake.increaseActiveTasksReturnsOnCall == nil {
fake.increaseActiveTasksReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.increaseActiveTasksReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *FakeWorker) IsOwnedByTeam() bool {
fake.isOwnedByTeamMutex.Lock()
ret, specificReturn := fake.isOwnedByTeamReturnsOnCall[len(fake.isOwnedByTeamArgsForCall)]
@ -855,7 +1156,15 @@ func (fake *FakeWorker) IsOwnedByTeamCallCount() int {
return len(fake.isOwnedByTeamArgsForCall)
}
func (fake *FakeWorker) IsOwnedByTeamCalls(stub func() bool) {
fake.isOwnedByTeamMutex.Lock()
defer fake.isOwnedByTeamMutex.Unlock()
fake.IsOwnedByTeamStub = stub
}
func (fake *FakeWorker) IsOwnedByTeamReturns(result1 bool) {
fake.isOwnedByTeamMutex.Lock()
defer fake.isOwnedByTeamMutex.Unlock()
fake.IsOwnedByTeamStub = nil
fake.isOwnedByTeamReturns = struct {
result1 bool
@ -863,6 +1172,8 @@ func (fake *FakeWorker) IsOwnedByTeamReturns(result1 bool) {
}
func (fake *FakeWorker) IsOwnedByTeamReturnsOnCall(i int, result1 bool) {
fake.isOwnedByTeamMutex.Lock()
defer fake.isOwnedByTeamMutex.Unlock()
fake.IsOwnedByTeamStub = nil
if fake.isOwnedByTeamReturnsOnCall == nil {
fake.isOwnedByTeamReturnsOnCall = make(map[int]struct {
@ -899,6 +1210,12 @@ func (fake *FakeWorker) IsVersionCompatibleCallCount() int {
return len(fake.isVersionCompatibleArgsForCall)
}
func (fake *FakeWorker) IsVersionCompatibleCalls(stub func(lager.Logger, version.Version) bool) {
fake.isVersionCompatibleMutex.Lock()
defer fake.isVersionCompatibleMutex.Unlock()
fake.IsVersionCompatibleStub = stub
}
func (fake *FakeWorker) IsVersionCompatibleArgsForCall(i int) (lager.Logger, version.Version) {
fake.isVersionCompatibleMutex.RLock()
defer fake.isVersionCompatibleMutex.RUnlock()
@ -907,6 +1224,8 @@ func (fake *FakeWorker) IsVersionCompatibleArgsForCall(i int) (lager.Logger, ver
}
func (fake *FakeWorker) IsVersionCompatibleReturns(result1 bool) {
fake.isVersionCompatibleMutex.Lock()
defer fake.isVersionCompatibleMutex.Unlock()
fake.IsVersionCompatibleStub = nil
fake.isVersionCompatibleReturns = struct {
result1 bool
@ -914,6 +1233,8 @@ func (fake *FakeWorker) IsVersionCompatibleReturns(result1 bool) {
}
func (fake *FakeWorker) IsVersionCompatibleReturnsOnCall(i int, result1 bool) {
fake.isVersionCompatibleMutex.Lock()
defer fake.isVersionCompatibleMutex.Unlock()
fake.IsVersionCompatibleStub = nil
if fake.isVersionCompatibleReturnsOnCall == nil {
fake.isVersionCompatibleReturnsOnCall = make(map[int]struct {
@ -950,6 +1271,12 @@ func (fake *FakeWorker) LookupVolumeCallCount() int {
return len(fake.lookupVolumeArgsForCall)
}
func (fake *FakeWorker) LookupVolumeCalls(stub func(lager.Logger, string) (worker.Volume, bool, error)) {
fake.lookupVolumeMutex.Lock()
defer fake.lookupVolumeMutex.Unlock()
fake.LookupVolumeStub = stub
}
func (fake *FakeWorker) LookupVolumeArgsForCall(i int) (lager.Logger, string) {
fake.lookupVolumeMutex.RLock()
defer fake.lookupVolumeMutex.RUnlock()
@ -958,6 +1285,8 @@ func (fake *FakeWorker) LookupVolumeArgsForCall(i int) (lager.Logger, string) {
}
func (fake *FakeWorker) LookupVolumeReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.lookupVolumeMutex.Lock()
defer fake.lookupVolumeMutex.Unlock()
fake.LookupVolumeStub = nil
fake.lookupVolumeReturns = struct {
result1 worker.Volume
@ -967,6 +1296,8 @@ func (fake *FakeWorker) LookupVolumeReturns(result1 worker.Volume, result2 bool,
}
func (fake *FakeWorker) LookupVolumeReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.lookupVolumeMutex.Lock()
defer fake.lookupVolumeMutex.Unlock()
fake.LookupVolumeStub = nil
if fake.lookupVolumeReturnsOnCall == nil {
fake.lookupVolumeReturnsOnCall = make(map[int]struct {
@ -1005,7 +1336,15 @@ func (fake *FakeWorker) NameCallCount() int {
return len(fake.nameArgsForCall)
}
func (fake *FakeWorker) NameCalls(stub func() string) {
fake.nameMutex.Lock()
defer fake.nameMutex.Unlock()
fake.NameStub = stub
}
func (fake *FakeWorker) NameReturns(result1 string) {
fake.nameMutex.Lock()
defer fake.nameMutex.Unlock()
fake.NameStub = nil
fake.nameReturns = struct {
result1 string
@ -1013,6 +1352,8 @@ func (fake *FakeWorker) NameReturns(result1 string) {
}
func (fake *FakeWorker) NameReturnsOnCall(i int, result1 string) {
fake.nameMutex.Lock()
defer fake.nameMutex.Unlock()
fake.NameStub = nil
if fake.nameReturnsOnCall == nil {
fake.nameReturnsOnCall = make(map[int]struct {
@ -1047,7 +1388,15 @@ func (fake *FakeWorker) ResourceTypesCallCount() int {
return len(fake.resourceTypesArgsForCall)
}
func (fake *FakeWorker) ResourceTypesCalls(stub func() []atc.WorkerResourceType) {
fake.resourceTypesMutex.Lock()
defer fake.resourceTypesMutex.Unlock()
fake.ResourceTypesStub = stub
}
func (fake *FakeWorker) ResourceTypesReturns(result1 []atc.WorkerResourceType) {
fake.resourceTypesMutex.Lock()
defer fake.resourceTypesMutex.Unlock()
fake.ResourceTypesStub = nil
fake.resourceTypesReturns = struct {
result1 []atc.WorkerResourceType
@ -1055,6 +1404,8 @@ func (fake *FakeWorker) ResourceTypesReturns(result1 []atc.WorkerResourceType) {
}
func (fake *FakeWorker) ResourceTypesReturnsOnCall(i int, result1 []atc.WorkerResourceType) {
fake.resourceTypesMutex.Lock()
defer fake.resourceTypesMutex.Unlock()
fake.ResourceTypesStub = nil
if fake.resourceTypesReturnsOnCall == nil {
fake.resourceTypesReturnsOnCall = make(map[int]struct {
@ -1091,6 +1442,12 @@ func (fake *FakeWorker) SatisfiesCallCount() int {
return len(fake.satisfiesArgsForCall)
}
func (fake *FakeWorker) SatisfiesCalls(stub func(lager.Logger, worker.WorkerSpec) bool) {
fake.satisfiesMutex.Lock()
defer fake.satisfiesMutex.Unlock()
fake.SatisfiesStub = stub
}
func (fake *FakeWorker) SatisfiesArgsForCall(i int) (lager.Logger, worker.WorkerSpec) {
fake.satisfiesMutex.RLock()
defer fake.satisfiesMutex.RUnlock()
@ -1099,6 +1456,8 @@ func (fake *FakeWorker) SatisfiesArgsForCall(i int) (lager.Logger, worker.Worker
}
func (fake *FakeWorker) SatisfiesReturns(result1 bool) {
fake.satisfiesMutex.Lock()
defer fake.satisfiesMutex.Unlock()
fake.SatisfiesStub = nil
fake.satisfiesReturns = struct {
result1 bool
@ -1106,6 +1465,8 @@ func (fake *FakeWorker) SatisfiesReturns(result1 bool) {
}
func (fake *FakeWorker) SatisfiesReturnsOnCall(i int, result1 bool) {
fake.satisfiesMutex.Lock()
defer fake.satisfiesMutex.Unlock()
fake.SatisfiesStub = nil
if fake.satisfiesReturnsOnCall == nil {
fake.satisfiesReturnsOnCall = make(map[int]struct {
@ -1140,7 +1501,15 @@ func (fake *FakeWorker) TagsCallCount() int {
return len(fake.tagsArgsForCall)
}
func (fake *FakeWorker) TagsCalls(stub func() atc.Tags) {
fake.tagsMutex.Lock()
defer fake.tagsMutex.Unlock()
fake.TagsStub = stub
}
func (fake *FakeWorker) TagsReturns(result1 atc.Tags) {
fake.tagsMutex.Lock()
defer fake.tagsMutex.Unlock()
fake.TagsStub = nil
fake.tagsReturns = struct {
result1 atc.Tags
@ -1148,6 +1517,8 @@ func (fake *FakeWorker) TagsReturns(result1 atc.Tags) {
}
func (fake *FakeWorker) TagsReturnsOnCall(i int, result1 atc.Tags) {
fake.tagsMutex.Lock()
defer fake.tagsMutex.Unlock()
fake.TagsStub = nil
if fake.tagsReturnsOnCall == nil {
fake.tagsReturnsOnCall = make(map[int]struct {
@ -1182,7 +1553,15 @@ func (fake *FakeWorker) UptimeCallCount() int {
return len(fake.uptimeArgsForCall)
}
func (fake *FakeWorker) UptimeCalls(stub func() time.Duration) {
fake.uptimeMutex.Lock()
defer fake.uptimeMutex.Unlock()
fake.UptimeStub = stub
}
func (fake *FakeWorker) UptimeReturns(result1 time.Duration) {
fake.uptimeMutex.Lock()
defer fake.uptimeMutex.Unlock()
fake.UptimeStub = nil
fake.uptimeReturns = struct {
result1 time.Duration
@ -1190,6 +1569,8 @@ func (fake *FakeWorker) UptimeReturns(result1 time.Duration) {
}
func (fake *FakeWorker) UptimeReturnsOnCall(i int, result1 time.Duration) {
fake.uptimeMutex.Lock()
defer fake.uptimeMutex.Unlock()
fake.UptimeStub = nil
if fake.uptimeReturnsOnCall == nil {
fake.uptimeReturnsOnCall = make(map[int]struct {
@ -1204,12 +1585,16 @@ func (fake *FakeWorker) UptimeReturnsOnCall(i int, result1 time.Duration) {
func (fake *FakeWorker) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.activeTasksMutex.RLock()
defer fake.activeTasksMutex.RUnlock()
fake.buildContainersMutex.RLock()
defer fake.buildContainersMutex.RUnlock()
fake.certsVolumeMutex.RLock()
defer fake.certsVolumeMutex.RUnlock()
fake.createVolumeMutex.RLock()
defer fake.createVolumeMutex.RUnlock()
fake.decreaseActiveTasksMutex.RLock()
defer fake.decreaseActiveTasksMutex.RUnlock()
fake.descriptionMutex.RLock()
defer fake.descriptionMutex.RUnlock()
fake.ensureDBContainerExistsMutex.RLock()
@ -1226,6 +1611,8 @@ func (fake *FakeWorker) Invocations() map[string][][]interface{} {
defer fake.findVolumeForTaskCacheMutex.RUnlock()
fake.gardenClientMutex.RLock()
defer fake.gardenClientMutex.RUnlock()
fake.increaseActiveTasksMutex.RLock()
defer fake.increaseActiveTasksMutex.RUnlock()
fake.isOwnedByTeamMutex.RLock()
defer fake.isOwnedByTeamMutex.RUnlock()
fake.isVersionCompatibleMutex.RLock()

View File

@ -2,12 +2,12 @@
package workerfakes
import (
sync "sync"
"sync"
clock "code.cloudfoundry.org/clock"
lager "code.cloudfoundry.org/lager"
db "github.com/concourse/concourse/atc/db"
worker "github.com/concourse/concourse/atc/worker"
"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
)
type FakeWorkerProvider struct {
@ -116,6 +116,12 @@ func (fake *FakeWorkerProvider) FindWorkerForContainerCallCount() int {
return len(fake.findWorkerForContainerArgsForCall)
}
func (fake *FakeWorkerProvider) FindWorkerForContainerCalls(stub func(lager.Logger, int, string) (worker.Worker, bool, error)) {
fake.findWorkerForContainerMutex.Lock()
defer fake.findWorkerForContainerMutex.Unlock()
fake.FindWorkerForContainerStub = stub
}
func (fake *FakeWorkerProvider) FindWorkerForContainerArgsForCall(i int) (lager.Logger, int, string) {
fake.findWorkerForContainerMutex.RLock()
defer fake.findWorkerForContainerMutex.RUnlock()
@ -124,6 +130,8 @@ func (fake *FakeWorkerProvider) FindWorkerForContainerArgsForCall(i int) (lager.
}
func (fake *FakeWorkerProvider) FindWorkerForContainerReturns(result1 worker.Worker, result2 bool, result3 error) {
fake.findWorkerForContainerMutex.Lock()
defer fake.findWorkerForContainerMutex.Unlock()
fake.FindWorkerForContainerStub = nil
fake.findWorkerForContainerReturns = struct {
result1 worker.Worker
@ -133,6 +141,8 @@ func (fake *FakeWorkerProvider) FindWorkerForContainerReturns(result1 worker.Wor
}
func (fake *FakeWorkerProvider) FindWorkerForContainerReturnsOnCall(i int, result1 worker.Worker, result2 bool, result3 error) {
fake.findWorkerForContainerMutex.Lock()
defer fake.findWorkerForContainerMutex.Unlock()
fake.FindWorkerForContainerStub = nil
if fake.findWorkerForContainerReturnsOnCall == nil {
fake.findWorkerForContainerReturnsOnCall = make(map[int]struct {
@ -174,6 +184,12 @@ func (fake *FakeWorkerProvider) FindWorkerForVolumeCallCount() int {
return len(fake.findWorkerForVolumeArgsForCall)
}
func (fake *FakeWorkerProvider) FindWorkerForVolumeCalls(stub func(lager.Logger, int, string) (worker.Worker, bool, error)) {
fake.findWorkerForVolumeMutex.Lock()
defer fake.findWorkerForVolumeMutex.Unlock()
fake.FindWorkerForVolumeStub = stub
}
func (fake *FakeWorkerProvider) FindWorkerForVolumeArgsForCall(i int) (lager.Logger, int, string) {
fake.findWorkerForVolumeMutex.RLock()
defer fake.findWorkerForVolumeMutex.RUnlock()
@ -182,6 +198,8 @@ func (fake *FakeWorkerProvider) FindWorkerForVolumeArgsForCall(i int) (lager.Log
}
func (fake *FakeWorkerProvider) FindWorkerForVolumeReturns(result1 worker.Worker, result2 bool, result3 error) {
fake.findWorkerForVolumeMutex.Lock()
defer fake.findWorkerForVolumeMutex.Unlock()
fake.FindWorkerForVolumeStub = nil
fake.findWorkerForVolumeReturns = struct {
result1 worker.Worker
@ -191,6 +209,8 @@ func (fake *FakeWorkerProvider) FindWorkerForVolumeReturns(result1 worker.Worker
}
func (fake *FakeWorkerProvider) FindWorkerForVolumeReturnsOnCall(i int, result1 worker.Worker, result2 bool, result3 error) {
fake.findWorkerForVolumeMutex.Lock()
defer fake.findWorkerForVolumeMutex.Unlock()
fake.FindWorkerForVolumeStub = nil
if fake.findWorkerForVolumeReturnsOnCall == nil {
fake.findWorkerForVolumeReturnsOnCall = make(map[int]struct {
@ -231,6 +251,12 @@ func (fake *FakeWorkerProvider) FindWorkersForContainerByOwnerCallCount() int {
return len(fake.findWorkersForContainerByOwnerArgsForCall)
}
func (fake *FakeWorkerProvider) FindWorkersForContainerByOwnerCalls(stub func(lager.Logger, db.ContainerOwner) ([]worker.Worker, error)) {
fake.findWorkersForContainerByOwnerMutex.Lock()
defer fake.findWorkersForContainerByOwnerMutex.Unlock()
fake.FindWorkersForContainerByOwnerStub = stub
}
func (fake *FakeWorkerProvider) FindWorkersForContainerByOwnerArgsForCall(i int) (lager.Logger, db.ContainerOwner) {
fake.findWorkersForContainerByOwnerMutex.RLock()
defer fake.findWorkersForContainerByOwnerMutex.RUnlock()
@ -239,6 +265,8 @@ func (fake *FakeWorkerProvider) FindWorkersForContainerByOwnerArgsForCall(i int)
}
func (fake *FakeWorkerProvider) FindWorkersForContainerByOwnerReturns(result1 []worker.Worker, result2 error) {
fake.findWorkersForContainerByOwnerMutex.Lock()
defer fake.findWorkersForContainerByOwnerMutex.Unlock()
fake.FindWorkersForContainerByOwnerStub = nil
fake.findWorkersForContainerByOwnerReturns = struct {
result1 []worker.Worker
@ -247,6 +275,8 @@ func (fake *FakeWorkerProvider) FindWorkersForContainerByOwnerReturns(result1 []
}
func (fake *FakeWorkerProvider) FindWorkersForContainerByOwnerReturnsOnCall(i int, result1 []worker.Worker, result2 error) {
fake.findWorkersForContainerByOwnerMutex.Lock()
defer fake.findWorkersForContainerByOwnerMutex.Unlock()
fake.FindWorkersForContainerByOwnerStub = nil
if fake.findWorkersForContainerByOwnerReturnsOnCall == nil {
fake.findWorkersForContainerByOwnerReturnsOnCall = make(map[int]struct {
@ -287,6 +317,12 @@ func (fake *FakeWorkerProvider) NewGardenWorkerCallCount() int {
return len(fake.newGardenWorkerArgsForCall)
}
func (fake *FakeWorkerProvider) NewGardenWorkerCalls(stub func(lager.Logger, clock.Clock, db.Worker, int) worker.Worker) {
fake.newGardenWorkerMutex.Lock()
defer fake.newGardenWorkerMutex.Unlock()
fake.NewGardenWorkerStub = stub
}
func (fake *FakeWorkerProvider) NewGardenWorkerArgsForCall(i int) (lager.Logger, clock.Clock, db.Worker, int) {
fake.newGardenWorkerMutex.RLock()
defer fake.newGardenWorkerMutex.RUnlock()
@ -295,6 +331,8 @@ func (fake *FakeWorkerProvider) NewGardenWorkerArgsForCall(i int) (lager.Logger,
}
func (fake *FakeWorkerProvider) NewGardenWorkerReturns(result1 worker.Worker) {
fake.newGardenWorkerMutex.Lock()
defer fake.newGardenWorkerMutex.Unlock()
fake.NewGardenWorkerStub = nil
fake.newGardenWorkerReturns = struct {
result1 worker.Worker
@ -302,6 +340,8 @@ func (fake *FakeWorkerProvider) NewGardenWorkerReturns(result1 worker.Worker) {
}
func (fake *FakeWorkerProvider) NewGardenWorkerReturnsOnCall(i int, result1 worker.Worker) {
fake.newGardenWorkerMutex.Lock()
defer fake.newGardenWorkerMutex.Unlock()
fake.NewGardenWorkerStub = nil
if fake.newGardenWorkerReturnsOnCall == nil {
fake.newGardenWorkerReturnsOnCall = make(map[int]struct {
@ -337,6 +377,12 @@ func (fake *FakeWorkerProvider) RunningWorkersCallCount() int {
return len(fake.runningWorkersArgsForCall)
}
func (fake *FakeWorkerProvider) RunningWorkersCalls(stub func(lager.Logger) ([]worker.Worker, error)) {
fake.runningWorkersMutex.Lock()
defer fake.runningWorkersMutex.Unlock()
fake.RunningWorkersStub = stub
}
func (fake *FakeWorkerProvider) RunningWorkersArgsForCall(i int) lager.Logger {
fake.runningWorkersMutex.RLock()
defer fake.runningWorkersMutex.RUnlock()
@ -345,6 +391,8 @@ func (fake *FakeWorkerProvider) RunningWorkersArgsForCall(i int) lager.Logger {
}
func (fake *FakeWorkerProvider) RunningWorkersReturns(result1 []worker.Worker, result2 error) {
fake.runningWorkersMutex.Lock()
defer fake.runningWorkersMutex.Unlock()
fake.RunningWorkersStub = nil
fake.runningWorkersReturns = struct {
result1 []worker.Worker
@ -353,6 +401,8 @@ func (fake *FakeWorkerProvider) RunningWorkersReturns(result1 []worker.Worker, r
}
func (fake *FakeWorkerProvider) RunningWorkersReturnsOnCall(i int, result1 []worker.Worker, result2 error) {
fake.runningWorkersMutex.Lock()
defer fake.runningWorkersMutex.Unlock()
fake.RunningWorkersStub = nil
if fake.runningWorkersReturnsOnCall == nil {
fake.runningWorkersReturnsOnCall = make(map[int]struct {

View File

@ -2,11 +2,11 @@
package concoursefakes
import (
io "io"
sync "sync"
"io"
"sync"
atc "github.com/concourse/concourse/atc"
concourse "github.com/concourse/concourse/go-concourse/concourse"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/go-concourse/concourse"
)
type FakeTeam struct {