diff --git a/go.mod b/go.mod index ad1a7d378..e38c0c935 100644 --- a/go.mod +++ b/go.mod @@ -84,6 +84,7 @@ require ( github.com/gobuffalo/packr v1.13.7 github.com/gocql/gocql v0.0.0-20180920092337-799fb0373110 // indirect github.com/gogo/googleapis v1.3.0 // indirect + github.com/gogo/protobuf v1.3.0 github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect github.com/google/go-cmp v0.3.1 // indirect @@ -129,7 +130,7 @@ require ( github.com/hashicorp/vault-plugin-secrets-gcpkms v0.0.0-20181212182553-6cd991800a6d // indirect github.com/hashicorp/vault-plugin-secrets-kv v0.0.0-20180825215324-5a464a61f7de // indirect github.com/hashicorp/yamux v0.0.0-20180917205041-7221087c3d28 // indirect - github.com/imdario/mergo v0.3.6 // indirect + github.com/imdario/mergo v0.3.6 github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf github.com/influxdata/influxdb1-client v0.0.0-20190118215656-f8cdb5d5f175 github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 // indirect @@ -161,7 +162,7 @@ require ( github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v0.1.1 // indirect - github.com/opencontainers/runtime-spec v1.0.1 // indirect + github.com/opencontainers/runtime-spec v1.0.1 github.com/ory-am/common v0.4.0 // indirect github.com/ory/dockertest v3.3.2+incompatible // indirect github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c // indirect diff --git a/worker/backend/backend.go b/worker/backend/backend.go index a0f631f8c..88db9accf 100644 --- a/worker/backend/backend.go +++ b/worker/backend/backend.go @@ -7,21 +7,27 @@ import ( "code.cloudfoundry.org/garden" "github.com/concourse/concourse/worker/backend/libcontainerd" + bespec "github.com/concourse/concourse/worker/backend/spec" + "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/namespaces" + specs "github.com/opencontainers/runtime-spec/specs-go" ) var _ garden.Backend = (*Backend)(nil) type Backend struct { - client libcontainerd.Client + client libcontainerd.Client + namespace string } -func New(client libcontainerd.Client) Backend { +func New(client libcontainerd.Client, namespace string) Backend { return Backend{ - client: client, + namespace: namespace, + client: client, } } -// Start sets up the connectivity to `containerd`. +// Start initializes the client. // func (b *Backend) Start() (err error) { err = b.client.Init() @@ -33,7 +39,12 @@ func (b *Backend) Start() (err error) { return } -func (b *Backend) Stop() {} +// Stop closes the client's underlying connections and frees any resources +// associated with it. +// +func (b *Backend) Stop() { + _ = b.client.Stop() +} func (b *Backend) GraceTime(container garden.Container) (duration time.Duration) { return @@ -41,13 +52,6 @@ func (b *Backend) GraceTime(container garden.Container) (duration time.Duration) // Pings the garden server in order to check connectivity. // -// The server may, optionally, respond with specific errors indicating health -// issues. -// -// Errors: -// * garden.UnrecoverableError indicates that the garden server has entered an error state from which it cannot recover -// -// TODO - we might use the `version` service here as a proxy to "ping" func (b *Backend) Ping() (err error) { err = b.client.Version(context.Background()) return @@ -61,11 +65,32 @@ func (b *Backend) Capacity() (capacity garden.Capacity, err error) { return } // Create creates a new container. // -// Errors: -// * When the handle, if specified, is already taken. -// * When one of the bind_mount paths does not exist. -// * When resource allocations fail (subnet, user ID, etc). -func (b *Backend) Create(spec garden.ContainerSpec) (container garden.Container, err error) { +func (b *Backend) Create(gdnSpec garden.ContainerSpec) (container garden.Container, err error) { + var ( + oci *specs.Spec + ctx = namespaces.WithNamespace(context.Background(), b.namespace) + ) + + oci, err = bespec.OciSpec(gdnSpec) + if err != nil { + err = fmt.Errorf("failed to convert garden spec to oci spec: %w", err) + return + } + + cont, err := b.client.NewContainer(ctx, + gdnSpec.Handle, gdnSpec.Properties, oci, + ) + if err != nil { + err = fmt.Errorf("failed to create a container in containerd: %w", err) + return + } + + _, err = cont.NewTask(ctx, cio.NullIO) + if err != nil { + err = fmt.Errorf("failed to create a task in container: %w", err) + return + } + return } @@ -88,6 +113,24 @@ func (b *Backend) Destroy(handle string) (err error) { return } // Errors: // * None. func (b *Backend) Containers(properties garden.Properties) (containers []garden.Container, err error) { + var ctx = namespaces.WithNamespace(context.Background(), b.namespace) + + filters, err := propertiesToFilterList(properties) + if err != nil { + return + } + + res, err := b.client.Containers(ctx, filters...) + if err != nil { + return + } + + containers = make([]garden.Container, len(res)) + for idx := range res { + gContainer := NewContainer() + containers[idx] = &gContainer + } + return } @@ -106,3 +149,23 @@ func (b *Backend) BulkMetrics(handles []string) (metrics map[string]garden.Conta // Errors: // * Container not found. func (b *Backend) Lookup(handle string) (container garden.Container, err error) { return } + +// propertiesToFilterList converts a set of garden properties to a list of +// filters as expected by containerd. +// +func propertiesToFilterList(properties garden.Properties) (filters []string, err error) { + filters = make([]string, len(properties)) + + idx := 0 + for k, v := range properties { + if k == "" || v == "" { + err = fmt.Errorf("key or value must not be empty") + return + } + + filters[idx] = k + "=" + v + idx++ + } + + return +} diff --git a/worker/backend/backend_test.go b/worker/backend/backend_test.go index 4d63a52d0..461d2efbe 100644 --- a/worker/backend/backend_test.go +++ b/worker/backend/backend_test.go @@ -4,8 +4,11 @@ import ( "errors" "testing" + "code.cloudfoundry.org/garden" "github.com/concourse/concourse/worker/backend" "github.com/concourse/concourse/worker/backend/libcontainerd/libcontainerdfakes" + "github.com/containerd/containerd" + "github.com/containerd/containerd/namespaces" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -18,9 +21,11 @@ type BackendSuite struct { client *libcontainerdfakes.FakeClient } +const testNamespace = "test-namespace" + func (s *BackendSuite) SetupTest() { s.client = new(libcontainerdfakes.FakeClient) - s.backend = backend.New(s.client) + s.backend = backend.New(s.client, testNamespace) } func (s *BackendSuite) TestPing() { @@ -54,6 +59,131 @@ func (s *BackendSuite) TestPing() { } } +var ( + invalidGdnSpec = garden.ContainerSpec{} + minimumValidGdnSpec = garden.ContainerSpec{ + Handle: "handle", RootFSPath: "raw:///rootfs", + } +) + +func (s *BackendSuite) TestCreateWithInvalidSpec() { + _, err := s.backend.Create(invalidGdnSpec) + + s.Error(err) + s.Equal(0, s.client.NewContainerCallCount()) +} + +func (s *BackendSuite) TestCreateWithNewContainerFailure() { + s.client.NewContainerReturns(nil, errors.New("err")) + + _, err := s.backend.Create(minimumValidGdnSpec) + s.Error(err) + + s.Equal(1, s.client.NewContainerCallCount()) +} + +func (s *BackendSuite) TestCreateSetsNamespace() { + fakeContainer := new(libcontainerdfakes.FakeContainer) + s.client.NewContainerReturns(fakeContainer, nil) + + _, _ = s.backend.Create(minimumValidGdnSpec) + s.Equal(1, s.client.NewContainerCallCount()) + + ctx, _, _, _ := s.client.NewContainerArgsForCall(0) + namespace, ok := namespaces.Namespace(ctx) + s.True(ok) + s.Equal(testNamespace, namespace) +} + +func (s *BackendSuite) TestCreateContainerNewTaskFailure() { + fakeContainer := new(libcontainerdfakes.FakeContainer) + fakeContainer.NewTaskReturns(nil, errors.New("err")) + + s.client.NewContainerReturns(fakeContainer, nil) + + _, err := s.backend.Create(minimumValidGdnSpec) + s.Error(err) + + s.Equal(1, fakeContainer.NewTaskCallCount()) +} + +func (s *BackendSuite) TestContainersWithContainerdFailure() { + s.client.ContainersReturns(nil, errors.New("err")) + + _, err := s.backend.Containers(nil) + s.Error(err) + s.Equal(1, s.client.ContainersCallCount()) +} + +func (s *BackendSuite) TestContainersSetsNamespace() { + _, _ = s.backend.Containers(nil) + s.Equal(1, s.client.ContainersCallCount()) + + ctx, _ := s.client.ContainersArgsForCall(0) + namespace, ok := namespaces.Namespace(ctx) + s.True(ok) + s.Equal(testNamespace, namespace) +} + +func (s *BackendSuite) TestContainersWithInvalidPropertyFilters() { + for _, tc := range []struct { + desc string + filter map[string]string + }{ + { + desc: "empty key", + filter: map[string]string{ + "": "bar", + }, + }, + { + desc: "empty value", + filter: map[string]string{ + "foo": "", + }, + }, + } { + s.T().Run(tc.desc, func(t *testing.T) { + _, err := s.backend.Containers(tc.filter) + + s.Error(err) + s.Equal(0, s.client.ContainersCallCount()) + }) + } +} + +func (s *BackendSuite) TestContainersWithProperProperties() { + _, _ = s.backend.Containers(map[string]string{"foo": "bar", "caz": "zaz"}) + s.Equal(1, s.client.ContainersCallCount()) + + _, labelSet := s.client.ContainersArgsForCall(0) + s.ElementsMatch([]string{"foo=bar", "caz=zaz"}, labelSet) +} + +func (s *BackendSuite) TestContainersConversion() { + fakeContainer1 := new(libcontainerdfakes.FakeContainer) + fakeContainer2 := new(libcontainerdfakes.FakeContainer) + + s.client.ContainersReturns([]containerd.Container{ + fakeContainer1, fakeContainer2, + }, nil) + + containers, err := s.backend.Containers(nil) + s.NoError(err) + s.Equal(1, s.client.ContainersCallCount()) + s.Len(containers, 2) +} + +func (s *BackendSuite) TestStart() { + s.backend.Start() + s.Equal(1, s.client.InitCallCount()) +} + +func (s *BackendSuite) TestStop() { + s.backend.Stop() + s.Equal(1, s.client.StopCallCount()) +} + func TestSuite(t *testing.T) { suite.Run(t, &BackendSuite{ Assertions: require.New(t), diff --git a/worker/backend/container.go b/worker/backend/container.go index 291908d68..6ba0afa40 100644 --- a/worker/backend/container.go +++ b/worker/backend/container.go @@ -9,6 +9,10 @@ import ( type Container struct{} +func NewContainer() Container { + return Container{} +} + var _ garden.Container = (*Container)(nil) func (c *Container) Handle() (handle string) { return } diff --git a/worker/backend/integration/integration_test.go b/worker/backend/integration/integration_test.go new file mode 100644 index 000000000..360b753e9 --- /dev/null +++ b/worker/backend/integration/integration_test.go @@ -0,0 +1,77 @@ +package integration_test + +import ( + "path/filepath" + "strconv" + "testing" + "time" + + "code.cloudfoundry.org/garden" + "github.com/concourse/concourse/worker/backend" + "github.com/concourse/concourse/worker/backend/libcontainerd" + uuid "github.com/nu7hatch/gouuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type BackendSuite struct { + suite.Suite + *require.Assertions + + backend backend.Backend + client *libcontainerd.Client +} + +func (s *BackendSuite) SetupTest() { + namespace := "test" + strconv.FormatInt(time.Now().UnixNano(), 10) + + s.backend = backend.New( + libcontainerd.New("/run/containerd/containerd.sock"), + namespace, + ) + + s.NoError(s.backend.Start()) +} + +func (s *BackendSuite) TearDownTest() { + s.backend.Stop() +} + +func (s *BackendSuite) TestPing() { + s.NoError(s.backend.Ping()) +} + +func (s *BackendSuite) TestContainerCreation() { + handle := mustCreateHandle() + rootfs, err := filepath.Abs("testdata/rootfs") + s.NoError(err) + + _, err = s.backend.Create(garden.ContainerSpec{ + Handle: handle, + RootFSPath: "raw://" + rootfs, + Privileged: true, + }) + s.NoError(err) + + defer s.backend.Destroy(handle) + + containers, err := s.backend.Containers(nil) + s.NoError(err) + + s.Len(containers, 1) +} + +func TestSuite(t *testing.T) { + suite.Run(t, &BackendSuite{ + Assertions: require.New(t), + }) +} + +func mustCreateHandle() string { + u4, err := uuid.NewV4() + if err != nil { + panic("couldn't create new uuid") + } + + return u4.String() +} diff --git a/worker/backend/libcontainerd/client.go b/worker/backend/libcontainerd/client.go index 426327b7f..f080cf39c 100644 --- a/worker/backend/libcontainerd/client.go +++ b/worker/backend/libcontainerd/client.go @@ -5,24 +5,64 @@ import ( "fmt" "github.com/containerd/containerd" + "github.com/opencontainers/runtime-spec/specs-go" ) //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . Client +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 github.com/containerd/containerd.Container +// Client represents the minimum interface used to communicate with containerd +// to manage containers. +// type Client interface { + + // Init provides the initialization of internal structures necessary by + // the client, e.g., instantiation of the gRPC client. + // Init() (err error) + + // Version queries containerd's version service in order to verify + // connectivity. + // Version(ctx context.Context) (err error) + + // Stop deallocates any initialization performed by `Init()` and + // subsequent calls to methods of this interface. + // + Stop() (err error) + + // NewContainer creates a container in containerd. + // + NewContainer( + ctx context.Context, + id string, + labels map[string]string, + oci *specs.Spec, + ) ( + container containerd.Container, err error, + ) + + // Containers lists containers available in containerd matching a given + // labelset. + // + Containers( + ctx context.Context, + labels ...string, + ) ( + containers []containerd.Container, err error, + ) } type client struct { - addr string + addr string + containerd *containerd.Client } +var _ Client = (*client)(nil) + func New(addr string) *client { - return &client{ - addr: addr, - } + return &client{addr: addr} } func (c *client) Init() (err error) { @@ -35,6 +75,34 @@ func (c *client) Init() (err error) { return } +func (c *client) Stop() (err error) { + if c.containerd == nil { + return + } + + err = c.containerd.Close() + return +} + +func (c *client) NewContainer( + ctx context.Context, id string, labels map[string]string, oci *specs.Spec, +) ( + containerd.Container, error, +) { + return c.containerd.NewContainer(ctx, id, + containerd.WithSpec(oci), + containerd.WithContainerLabels(labels), + ) +} + +func (c *client) Containers( + ctx context.Context, labels ...string, +) ( + []containerd.Container, error, +) { + return c.containerd.Containers(ctx, labels...) +} + func (c *client) Version(ctx context.Context) (err error) { _, err = c.containerd.Version(ctx) return diff --git a/worker/backend/libcontainerd/libcontainerdfakes/fake_client.go b/worker/backend/libcontainerd/libcontainerdfakes/fake_client.go index 59361c5c0..c01056914 100644 --- a/worker/backend/libcontainerd/libcontainerdfakes/fake_client.go +++ b/worker/backend/libcontainerd/libcontainerdfakes/fake_client.go @@ -6,9 +6,25 @@ import ( "sync" "github.com/concourse/concourse/worker/backend/libcontainerd" + "github.com/containerd/containerd" + specs "github.com/opencontainers/runtime-spec/specs-go" ) type FakeClient struct { + ContainersStub func(context.Context, ...string) ([]containerd.Container, error) + containersMutex sync.RWMutex + containersArgsForCall []struct { + arg1 context.Context + arg2 []string + } + containersReturns struct { + result1 []containerd.Container + result2 error + } + containersReturnsOnCall map[int]struct { + result1 []containerd.Container + result2 error + } InitStub func() error initMutex sync.RWMutex initArgsForCall []struct { @@ -19,6 +35,32 @@ type FakeClient struct { initReturnsOnCall map[int]struct { result1 error } + NewContainerStub func(context.Context, string, map[string]string, *specs.Spec) (containerd.Container, error) + newContainerMutex sync.RWMutex + newContainerArgsForCall []struct { + arg1 context.Context + arg2 string + arg3 map[string]string + arg4 *specs.Spec + } + newContainerReturns struct { + result1 containerd.Container + result2 error + } + newContainerReturnsOnCall map[int]struct { + result1 containerd.Container + result2 error + } + StopStub func() error + stopMutex sync.RWMutex + stopArgsForCall []struct { + } + stopReturns struct { + result1 error + } + stopReturnsOnCall map[int]struct { + result1 error + } VersionStub func(context.Context) error versionMutex sync.RWMutex versionArgsForCall []struct { @@ -34,6 +76,70 @@ type FakeClient struct { invocationsMutex sync.RWMutex } +func (fake *FakeClient) Containers(arg1 context.Context, arg2 ...string) ([]containerd.Container, error) { + fake.containersMutex.Lock() + ret, specificReturn := fake.containersReturnsOnCall[len(fake.containersArgsForCall)] + fake.containersArgsForCall = append(fake.containersArgsForCall, struct { + arg1 context.Context + arg2 []string + }{arg1, arg2}) + fake.recordInvocation("Containers", []interface{}{arg1, arg2}) + fake.containersMutex.Unlock() + if fake.ContainersStub != nil { + return fake.ContainersStub(arg1, arg2...) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.containersReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeClient) ContainersCallCount() int { + fake.containersMutex.RLock() + defer fake.containersMutex.RUnlock() + return len(fake.containersArgsForCall) +} + +func (fake *FakeClient) ContainersCalls(stub func(context.Context, ...string) ([]containerd.Container, error)) { + fake.containersMutex.Lock() + defer fake.containersMutex.Unlock() + fake.ContainersStub = stub +} + +func (fake *FakeClient) ContainersArgsForCall(i int) (context.Context, []string) { + fake.containersMutex.RLock() + defer fake.containersMutex.RUnlock() + argsForCall := fake.containersArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeClient) ContainersReturns(result1 []containerd.Container, result2 error) { + fake.containersMutex.Lock() + defer fake.containersMutex.Unlock() + fake.ContainersStub = nil + fake.containersReturns = struct { + result1 []containerd.Container + result2 error + }{result1, result2} +} + +func (fake *FakeClient) ContainersReturnsOnCall(i int, result1 []containerd.Container, result2 error) { + fake.containersMutex.Lock() + defer fake.containersMutex.Unlock() + fake.ContainersStub = nil + if fake.containersReturnsOnCall == nil { + fake.containersReturnsOnCall = make(map[int]struct { + result1 []containerd.Container + result2 error + }) + } + fake.containersReturnsOnCall[i] = struct { + result1 []containerd.Container + result2 error + }{result1, result2} +} + func (fake *FakeClient) Init() error { fake.initMutex.Lock() ret, specificReturn := fake.initReturnsOnCall[len(fake.initArgsForCall)] @@ -86,6 +192,124 @@ func (fake *FakeClient) InitReturnsOnCall(i int, result1 error) { }{result1} } +func (fake *FakeClient) NewContainer(arg1 context.Context, arg2 string, arg3 map[string]string, arg4 *specs.Spec) (containerd.Container, error) { + fake.newContainerMutex.Lock() + ret, specificReturn := fake.newContainerReturnsOnCall[len(fake.newContainerArgsForCall)] + fake.newContainerArgsForCall = append(fake.newContainerArgsForCall, struct { + arg1 context.Context + arg2 string + arg3 map[string]string + arg4 *specs.Spec + }{arg1, arg2, arg3, arg4}) + fake.recordInvocation("NewContainer", []interface{}{arg1, arg2, arg3, arg4}) + fake.newContainerMutex.Unlock() + if fake.NewContainerStub != nil { + return fake.NewContainerStub(arg1, arg2, arg3, arg4) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.newContainerReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeClient) NewContainerCallCount() int { + fake.newContainerMutex.RLock() + defer fake.newContainerMutex.RUnlock() + return len(fake.newContainerArgsForCall) +} + +func (fake *FakeClient) NewContainerCalls(stub func(context.Context, string, map[string]string, *specs.Spec) (containerd.Container, error)) { + fake.newContainerMutex.Lock() + defer fake.newContainerMutex.Unlock() + fake.NewContainerStub = stub +} + +func (fake *FakeClient) NewContainerArgsForCall(i int) (context.Context, string, map[string]string, *specs.Spec) { + fake.newContainerMutex.RLock() + defer fake.newContainerMutex.RUnlock() + argsForCall := fake.newContainerArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *FakeClient) NewContainerReturns(result1 containerd.Container, result2 error) { + fake.newContainerMutex.Lock() + defer fake.newContainerMutex.Unlock() + fake.NewContainerStub = nil + fake.newContainerReturns = struct { + result1 containerd.Container + result2 error + }{result1, result2} +} + +func (fake *FakeClient) NewContainerReturnsOnCall(i int, result1 containerd.Container, result2 error) { + fake.newContainerMutex.Lock() + defer fake.newContainerMutex.Unlock() + fake.NewContainerStub = nil + if fake.newContainerReturnsOnCall == nil { + fake.newContainerReturnsOnCall = make(map[int]struct { + result1 containerd.Container + result2 error + }) + } + fake.newContainerReturnsOnCall[i] = struct { + result1 containerd.Container + result2 error + }{result1, result2} +} + +func (fake *FakeClient) Stop() error { + fake.stopMutex.Lock() + ret, specificReturn := fake.stopReturnsOnCall[len(fake.stopArgsForCall)] + fake.stopArgsForCall = append(fake.stopArgsForCall, struct { + }{}) + fake.recordInvocation("Stop", []interface{}{}) + fake.stopMutex.Unlock() + if fake.StopStub != nil { + return fake.StopStub() + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.stopReturns + return fakeReturns.result1 +} + +func (fake *FakeClient) StopCallCount() int { + fake.stopMutex.RLock() + defer fake.stopMutex.RUnlock() + return len(fake.stopArgsForCall) +} + +func (fake *FakeClient) StopCalls(stub func() error) { + fake.stopMutex.Lock() + defer fake.stopMutex.Unlock() + fake.StopStub = stub +} + +func (fake *FakeClient) StopReturns(result1 error) { + fake.stopMutex.Lock() + defer fake.stopMutex.Unlock() + fake.StopStub = nil + fake.stopReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) StopReturnsOnCall(i int, result1 error) { + fake.stopMutex.Lock() + defer fake.stopMutex.Unlock() + fake.StopStub = nil + if fake.stopReturnsOnCall == nil { + fake.stopReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.stopReturnsOnCall[i] = struct { + result1 error + }{result1} +} + func (fake *FakeClient) Version(arg1 context.Context) error { fake.versionMutex.Lock() ret, specificReturn := fake.versionReturnsOnCall[len(fake.versionArgsForCall)] @@ -149,8 +373,14 @@ func (fake *FakeClient) VersionReturnsOnCall(i int, result1 error) { func (fake *FakeClient) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() + fake.containersMutex.RLock() + defer fake.containersMutex.RUnlock() fake.initMutex.RLock() defer fake.initMutex.RUnlock() + fake.newContainerMutex.RLock() + defer fake.newContainerMutex.RUnlock() + fake.stopMutex.RLock() + defer fake.stopMutex.RUnlock() fake.versionMutex.RLock() defer fake.versionMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} diff --git a/worker/backend/libcontainerd/libcontainerdfakes/fake_container.go b/worker/backend/libcontainerd/libcontainerdfakes/fake_container.go new file mode 100644 index 000000000..884106c48 --- /dev/null +++ b/worker/backend/libcontainerd/libcontainerdfakes/fake_container.go @@ -0,0 +1,972 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package libcontainerdfakes + +import ( + "context" + "sync" + + "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/containers" + "github.com/gogo/protobuf/types" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +type FakeContainer struct { + CheckpointStub func(context.Context, string, ...containerd.CheckpointOpts) (containerd.Image, error) + checkpointMutex sync.RWMutex + checkpointArgsForCall []struct { + arg1 context.Context + arg2 string + arg3 []containerd.CheckpointOpts + } + checkpointReturns struct { + result1 containerd.Image + result2 error + } + checkpointReturnsOnCall map[int]struct { + result1 containerd.Image + result2 error + } + DeleteStub func(context.Context, ...containerd.DeleteOpts) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 context.Context + arg2 []containerd.DeleteOpts + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + ExtensionsStub func(context.Context) (map[string]types.Any, error) + extensionsMutex sync.RWMutex + extensionsArgsForCall []struct { + arg1 context.Context + } + extensionsReturns struct { + result1 map[string]types.Any + result2 error + } + extensionsReturnsOnCall map[int]struct { + result1 map[string]types.Any + result2 error + } + IDStub func() string + iDMutex sync.RWMutex + iDArgsForCall []struct { + } + iDReturns struct { + result1 string + } + iDReturnsOnCall map[int]struct { + result1 string + } + ImageStub func(context.Context) (containerd.Image, error) + imageMutex sync.RWMutex + imageArgsForCall []struct { + arg1 context.Context + } + imageReturns struct { + result1 containerd.Image + result2 error + } + imageReturnsOnCall map[int]struct { + result1 containerd.Image + result2 error + } + InfoStub func(context.Context, ...containerd.InfoOpts) (containers.Container, error) + infoMutex sync.RWMutex + infoArgsForCall []struct { + arg1 context.Context + arg2 []containerd.InfoOpts + } + infoReturns struct { + result1 containers.Container + result2 error + } + infoReturnsOnCall map[int]struct { + result1 containers.Container + result2 error + } + LabelsStub func(context.Context) (map[string]string, error) + labelsMutex sync.RWMutex + labelsArgsForCall []struct { + arg1 context.Context + } + labelsReturns struct { + result1 map[string]string + result2 error + } + labelsReturnsOnCall map[int]struct { + result1 map[string]string + result2 error + } + NewTaskStub func(context.Context, cio.Creator, ...containerd.NewTaskOpts) (containerd.Task, error) + newTaskMutex sync.RWMutex + newTaskArgsForCall []struct { + arg1 context.Context + arg2 cio.Creator + arg3 []containerd.NewTaskOpts + } + newTaskReturns struct { + result1 containerd.Task + result2 error + } + newTaskReturnsOnCall map[int]struct { + result1 containerd.Task + result2 error + } + SetLabelsStub func(context.Context, map[string]string) (map[string]string, error) + setLabelsMutex sync.RWMutex + setLabelsArgsForCall []struct { + arg1 context.Context + arg2 map[string]string + } + setLabelsReturns struct { + result1 map[string]string + result2 error + } + setLabelsReturnsOnCall map[int]struct { + result1 map[string]string + result2 error + } + SpecStub func(context.Context) (*specs.Spec, error) + specMutex sync.RWMutex + specArgsForCall []struct { + arg1 context.Context + } + specReturns struct { + result1 *specs.Spec + result2 error + } + specReturnsOnCall map[int]struct { + result1 *specs.Spec + result2 error + } + TaskStub func(context.Context, cio.Attach) (containerd.Task, error) + taskMutex sync.RWMutex + taskArgsForCall []struct { + arg1 context.Context + arg2 cio.Attach + } + taskReturns struct { + result1 containerd.Task + result2 error + } + taskReturnsOnCall map[int]struct { + result1 containerd.Task + result2 error + } + UpdateStub func(context.Context, ...containerd.UpdateContainerOpts) error + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 context.Context + arg2 []containerd.UpdateContainerOpts + } + updateReturns struct { + result1 error + } + updateReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeContainer) Checkpoint(arg1 context.Context, arg2 string, arg3 ...containerd.CheckpointOpts) (containerd.Image, error) { + fake.checkpointMutex.Lock() + ret, specificReturn := fake.checkpointReturnsOnCall[len(fake.checkpointArgsForCall)] + fake.checkpointArgsForCall = append(fake.checkpointArgsForCall, struct { + arg1 context.Context + arg2 string + arg3 []containerd.CheckpointOpts + }{arg1, arg2, arg3}) + fake.recordInvocation("Checkpoint", []interface{}{arg1, arg2, arg3}) + fake.checkpointMutex.Unlock() + if fake.CheckpointStub != nil { + return fake.CheckpointStub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.checkpointReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeContainer) CheckpointCallCount() int { + fake.checkpointMutex.RLock() + defer fake.checkpointMutex.RUnlock() + return len(fake.checkpointArgsForCall) +} + +func (fake *FakeContainer) CheckpointCalls(stub func(context.Context, string, ...containerd.CheckpointOpts) (containerd.Image, error)) { + fake.checkpointMutex.Lock() + defer fake.checkpointMutex.Unlock() + fake.CheckpointStub = stub +} + +func (fake *FakeContainer) CheckpointArgsForCall(i int) (context.Context, string, []containerd.CheckpointOpts) { + fake.checkpointMutex.RLock() + defer fake.checkpointMutex.RUnlock() + argsForCall := fake.checkpointArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeContainer) CheckpointReturns(result1 containerd.Image, result2 error) { + fake.checkpointMutex.Lock() + defer fake.checkpointMutex.Unlock() + fake.CheckpointStub = nil + fake.checkpointReturns = struct { + result1 containerd.Image + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) CheckpointReturnsOnCall(i int, result1 containerd.Image, result2 error) { + fake.checkpointMutex.Lock() + defer fake.checkpointMutex.Unlock() + fake.CheckpointStub = nil + if fake.checkpointReturnsOnCall == nil { + fake.checkpointReturnsOnCall = make(map[int]struct { + result1 containerd.Image + result2 error + }) + } + fake.checkpointReturnsOnCall[i] = struct { + result1 containerd.Image + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) Delete(arg1 context.Context, arg2 ...containerd.DeleteOpts) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 context.Context + arg2 []containerd.DeleteOpts + }{arg1, arg2}) + fake.recordInvocation("Delete", []interface{}{arg1, arg2}) + fake.deleteMutex.Unlock() + if fake.DeleteStub != nil { + return fake.DeleteStub(arg1, arg2...) + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.deleteReturns + return fakeReturns.result1 +} + +func (fake *FakeContainer) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *FakeContainer) DeleteCalls(stub func(context.Context, ...containerd.DeleteOpts) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *FakeContainer) DeleteArgsForCall(i int) (context.Context, []containerd.DeleteOpts) { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeContainer) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeContainer) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeContainer) Extensions(arg1 context.Context) (map[string]types.Any, error) { + fake.extensionsMutex.Lock() + ret, specificReturn := fake.extensionsReturnsOnCall[len(fake.extensionsArgsForCall)] + fake.extensionsArgsForCall = append(fake.extensionsArgsForCall, struct { + arg1 context.Context + }{arg1}) + fake.recordInvocation("Extensions", []interface{}{arg1}) + fake.extensionsMutex.Unlock() + if fake.ExtensionsStub != nil { + return fake.ExtensionsStub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.extensionsReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeContainer) ExtensionsCallCount() int { + fake.extensionsMutex.RLock() + defer fake.extensionsMutex.RUnlock() + return len(fake.extensionsArgsForCall) +} + +func (fake *FakeContainer) ExtensionsCalls(stub func(context.Context) (map[string]types.Any, error)) { + fake.extensionsMutex.Lock() + defer fake.extensionsMutex.Unlock() + fake.ExtensionsStub = stub +} + +func (fake *FakeContainer) ExtensionsArgsForCall(i int) context.Context { + fake.extensionsMutex.RLock() + defer fake.extensionsMutex.RUnlock() + argsForCall := fake.extensionsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeContainer) ExtensionsReturns(result1 map[string]types.Any, result2 error) { + fake.extensionsMutex.Lock() + defer fake.extensionsMutex.Unlock() + fake.ExtensionsStub = nil + fake.extensionsReturns = struct { + result1 map[string]types.Any + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) ExtensionsReturnsOnCall(i int, result1 map[string]types.Any, result2 error) { + fake.extensionsMutex.Lock() + defer fake.extensionsMutex.Unlock() + fake.ExtensionsStub = nil + if fake.extensionsReturnsOnCall == nil { + fake.extensionsReturnsOnCall = make(map[int]struct { + result1 map[string]types.Any + result2 error + }) + } + fake.extensionsReturnsOnCall[i] = struct { + result1 map[string]types.Any + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) ID() string { + fake.iDMutex.Lock() + ret, specificReturn := fake.iDReturnsOnCall[len(fake.iDArgsForCall)] + fake.iDArgsForCall = append(fake.iDArgsForCall, struct { + }{}) + fake.recordInvocation("ID", []interface{}{}) + fake.iDMutex.Unlock() + if fake.IDStub != nil { + return fake.IDStub() + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.iDReturns + return fakeReturns.result1 +} + +func (fake *FakeContainer) IDCallCount() int { + fake.iDMutex.RLock() + defer fake.iDMutex.RUnlock() + return len(fake.iDArgsForCall) +} + +func (fake *FakeContainer) IDCalls(stub func() string) { + fake.iDMutex.Lock() + defer fake.iDMutex.Unlock() + fake.IDStub = stub +} + +func (fake *FakeContainer) IDReturns(result1 string) { + fake.iDMutex.Lock() + defer fake.iDMutex.Unlock() + fake.IDStub = nil + fake.iDReturns = struct { + result1 string + }{result1} +} + +func (fake *FakeContainer) IDReturnsOnCall(i int, result1 string) { + fake.iDMutex.Lock() + defer fake.iDMutex.Unlock() + fake.IDStub = nil + if fake.iDReturnsOnCall == nil { + fake.iDReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.iDReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *FakeContainer) Image(arg1 context.Context) (containerd.Image, error) { + fake.imageMutex.Lock() + ret, specificReturn := fake.imageReturnsOnCall[len(fake.imageArgsForCall)] + fake.imageArgsForCall = append(fake.imageArgsForCall, struct { + arg1 context.Context + }{arg1}) + fake.recordInvocation("Image", []interface{}{arg1}) + fake.imageMutex.Unlock() + if fake.ImageStub != nil { + return fake.ImageStub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.imageReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeContainer) ImageCallCount() int { + fake.imageMutex.RLock() + defer fake.imageMutex.RUnlock() + return len(fake.imageArgsForCall) +} + +func (fake *FakeContainer) ImageCalls(stub func(context.Context) (containerd.Image, error)) { + fake.imageMutex.Lock() + defer fake.imageMutex.Unlock() + fake.ImageStub = stub +} + +func (fake *FakeContainer) ImageArgsForCall(i int) context.Context { + fake.imageMutex.RLock() + defer fake.imageMutex.RUnlock() + argsForCall := fake.imageArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeContainer) ImageReturns(result1 containerd.Image, result2 error) { + fake.imageMutex.Lock() + defer fake.imageMutex.Unlock() + fake.ImageStub = nil + fake.imageReturns = struct { + result1 containerd.Image + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) ImageReturnsOnCall(i int, result1 containerd.Image, result2 error) { + fake.imageMutex.Lock() + defer fake.imageMutex.Unlock() + fake.ImageStub = nil + if fake.imageReturnsOnCall == nil { + fake.imageReturnsOnCall = make(map[int]struct { + result1 containerd.Image + result2 error + }) + } + fake.imageReturnsOnCall[i] = struct { + result1 containerd.Image + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) Info(arg1 context.Context, arg2 ...containerd.InfoOpts) (containers.Container, error) { + fake.infoMutex.Lock() + ret, specificReturn := fake.infoReturnsOnCall[len(fake.infoArgsForCall)] + fake.infoArgsForCall = append(fake.infoArgsForCall, struct { + arg1 context.Context + arg2 []containerd.InfoOpts + }{arg1, arg2}) + fake.recordInvocation("Info", []interface{}{arg1, arg2}) + fake.infoMutex.Unlock() + if fake.InfoStub != nil { + return fake.InfoStub(arg1, arg2...) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.infoReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeContainer) InfoCallCount() int { + fake.infoMutex.RLock() + defer fake.infoMutex.RUnlock() + return len(fake.infoArgsForCall) +} + +func (fake *FakeContainer) InfoCalls(stub func(context.Context, ...containerd.InfoOpts) (containers.Container, error)) { + fake.infoMutex.Lock() + defer fake.infoMutex.Unlock() + fake.InfoStub = stub +} + +func (fake *FakeContainer) InfoArgsForCall(i int) (context.Context, []containerd.InfoOpts) { + fake.infoMutex.RLock() + defer fake.infoMutex.RUnlock() + argsForCall := fake.infoArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeContainer) InfoReturns(result1 containers.Container, result2 error) { + fake.infoMutex.Lock() + defer fake.infoMutex.Unlock() + fake.InfoStub = nil + fake.infoReturns = struct { + result1 containers.Container + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) InfoReturnsOnCall(i int, result1 containers.Container, result2 error) { + fake.infoMutex.Lock() + defer fake.infoMutex.Unlock() + fake.InfoStub = nil + if fake.infoReturnsOnCall == nil { + fake.infoReturnsOnCall = make(map[int]struct { + result1 containers.Container + result2 error + }) + } + fake.infoReturnsOnCall[i] = struct { + result1 containers.Container + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) Labels(arg1 context.Context) (map[string]string, error) { + fake.labelsMutex.Lock() + ret, specificReturn := fake.labelsReturnsOnCall[len(fake.labelsArgsForCall)] + fake.labelsArgsForCall = append(fake.labelsArgsForCall, struct { + arg1 context.Context + }{arg1}) + fake.recordInvocation("Labels", []interface{}{arg1}) + fake.labelsMutex.Unlock() + if fake.LabelsStub != nil { + return fake.LabelsStub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.labelsReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeContainer) LabelsCallCount() int { + fake.labelsMutex.RLock() + defer fake.labelsMutex.RUnlock() + return len(fake.labelsArgsForCall) +} + +func (fake *FakeContainer) LabelsCalls(stub func(context.Context) (map[string]string, error)) { + fake.labelsMutex.Lock() + defer fake.labelsMutex.Unlock() + fake.LabelsStub = stub +} + +func (fake *FakeContainer) LabelsArgsForCall(i int) context.Context { + fake.labelsMutex.RLock() + defer fake.labelsMutex.RUnlock() + argsForCall := fake.labelsArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeContainer) LabelsReturns(result1 map[string]string, result2 error) { + fake.labelsMutex.Lock() + defer fake.labelsMutex.Unlock() + fake.LabelsStub = nil + fake.labelsReturns = struct { + result1 map[string]string + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) LabelsReturnsOnCall(i int, result1 map[string]string, result2 error) { + fake.labelsMutex.Lock() + defer fake.labelsMutex.Unlock() + fake.LabelsStub = nil + if fake.labelsReturnsOnCall == nil { + fake.labelsReturnsOnCall = make(map[int]struct { + result1 map[string]string + result2 error + }) + } + fake.labelsReturnsOnCall[i] = struct { + result1 map[string]string + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) NewTask(arg1 context.Context, arg2 cio.Creator, arg3 ...containerd.NewTaskOpts) (containerd.Task, error) { + fake.newTaskMutex.Lock() + ret, specificReturn := fake.newTaskReturnsOnCall[len(fake.newTaskArgsForCall)] + fake.newTaskArgsForCall = append(fake.newTaskArgsForCall, struct { + arg1 context.Context + arg2 cio.Creator + arg3 []containerd.NewTaskOpts + }{arg1, arg2, arg3}) + fake.recordInvocation("NewTask", []interface{}{arg1, arg2, arg3}) + fake.newTaskMutex.Unlock() + if fake.NewTaskStub != nil { + return fake.NewTaskStub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.newTaskReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeContainer) NewTaskCallCount() int { + fake.newTaskMutex.RLock() + defer fake.newTaskMutex.RUnlock() + return len(fake.newTaskArgsForCall) +} + +func (fake *FakeContainer) NewTaskCalls(stub func(context.Context, cio.Creator, ...containerd.NewTaskOpts) (containerd.Task, error)) { + fake.newTaskMutex.Lock() + defer fake.newTaskMutex.Unlock() + fake.NewTaskStub = stub +} + +func (fake *FakeContainer) NewTaskArgsForCall(i int) (context.Context, cio.Creator, []containerd.NewTaskOpts) { + fake.newTaskMutex.RLock() + defer fake.newTaskMutex.RUnlock() + argsForCall := fake.newTaskArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeContainer) NewTaskReturns(result1 containerd.Task, result2 error) { + fake.newTaskMutex.Lock() + defer fake.newTaskMutex.Unlock() + fake.NewTaskStub = nil + fake.newTaskReturns = struct { + result1 containerd.Task + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) NewTaskReturnsOnCall(i int, result1 containerd.Task, result2 error) { + fake.newTaskMutex.Lock() + defer fake.newTaskMutex.Unlock() + fake.NewTaskStub = nil + if fake.newTaskReturnsOnCall == nil { + fake.newTaskReturnsOnCall = make(map[int]struct { + result1 containerd.Task + result2 error + }) + } + fake.newTaskReturnsOnCall[i] = struct { + result1 containerd.Task + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) SetLabels(arg1 context.Context, arg2 map[string]string) (map[string]string, error) { + fake.setLabelsMutex.Lock() + ret, specificReturn := fake.setLabelsReturnsOnCall[len(fake.setLabelsArgsForCall)] + fake.setLabelsArgsForCall = append(fake.setLabelsArgsForCall, struct { + arg1 context.Context + arg2 map[string]string + }{arg1, arg2}) + fake.recordInvocation("SetLabels", []interface{}{arg1, arg2}) + fake.setLabelsMutex.Unlock() + if fake.SetLabelsStub != nil { + return fake.SetLabelsStub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.setLabelsReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeContainer) SetLabelsCallCount() int { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + return len(fake.setLabelsArgsForCall) +} + +func (fake *FakeContainer) SetLabelsCalls(stub func(context.Context, map[string]string) (map[string]string, error)) { + fake.setLabelsMutex.Lock() + defer fake.setLabelsMutex.Unlock() + fake.SetLabelsStub = stub +} + +func (fake *FakeContainer) SetLabelsArgsForCall(i int) (context.Context, map[string]string) { + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + argsForCall := fake.setLabelsArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeContainer) SetLabelsReturns(result1 map[string]string, result2 error) { + fake.setLabelsMutex.Lock() + defer fake.setLabelsMutex.Unlock() + fake.SetLabelsStub = nil + fake.setLabelsReturns = struct { + result1 map[string]string + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) SetLabelsReturnsOnCall(i int, result1 map[string]string, result2 error) { + fake.setLabelsMutex.Lock() + defer fake.setLabelsMutex.Unlock() + fake.SetLabelsStub = nil + if fake.setLabelsReturnsOnCall == nil { + fake.setLabelsReturnsOnCall = make(map[int]struct { + result1 map[string]string + result2 error + }) + } + fake.setLabelsReturnsOnCall[i] = struct { + result1 map[string]string + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) Spec(arg1 context.Context) (*specs.Spec, error) { + fake.specMutex.Lock() + ret, specificReturn := fake.specReturnsOnCall[len(fake.specArgsForCall)] + fake.specArgsForCall = append(fake.specArgsForCall, struct { + arg1 context.Context + }{arg1}) + fake.recordInvocation("Spec", []interface{}{arg1}) + fake.specMutex.Unlock() + if fake.SpecStub != nil { + return fake.SpecStub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.specReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeContainer) SpecCallCount() int { + fake.specMutex.RLock() + defer fake.specMutex.RUnlock() + return len(fake.specArgsForCall) +} + +func (fake *FakeContainer) SpecCalls(stub func(context.Context) (*specs.Spec, error)) { + fake.specMutex.Lock() + defer fake.specMutex.Unlock() + fake.SpecStub = stub +} + +func (fake *FakeContainer) SpecArgsForCall(i int) context.Context { + fake.specMutex.RLock() + defer fake.specMutex.RUnlock() + argsForCall := fake.specArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeContainer) SpecReturns(result1 *specs.Spec, result2 error) { + fake.specMutex.Lock() + defer fake.specMutex.Unlock() + fake.SpecStub = nil + fake.specReturns = struct { + result1 *specs.Spec + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) SpecReturnsOnCall(i int, result1 *specs.Spec, result2 error) { + fake.specMutex.Lock() + defer fake.specMutex.Unlock() + fake.SpecStub = nil + if fake.specReturnsOnCall == nil { + fake.specReturnsOnCall = make(map[int]struct { + result1 *specs.Spec + result2 error + }) + } + fake.specReturnsOnCall[i] = struct { + result1 *specs.Spec + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) Task(arg1 context.Context, arg2 cio.Attach) (containerd.Task, error) { + fake.taskMutex.Lock() + ret, specificReturn := fake.taskReturnsOnCall[len(fake.taskArgsForCall)] + fake.taskArgsForCall = append(fake.taskArgsForCall, struct { + arg1 context.Context + arg2 cio.Attach + }{arg1, arg2}) + fake.recordInvocation("Task", []interface{}{arg1, arg2}) + fake.taskMutex.Unlock() + if fake.TaskStub != nil { + return fake.TaskStub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.taskReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeContainer) TaskCallCount() int { + fake.taskMutex.RLock() + defer fake.taskMutex.RUnlock() + return len(fake.taskArgsForCall) +} + +func (fake *FakeContainer) TaskCalls(stub func(context.Context, cio.Attach) (containerd.Task, error)) { + fake.taskMutex.Lock() + defer fake.taskMutex.Unlock() + fake.TaskStub = stub +} + +func (fake *FakeContainer) TaskArgsForCall(i int) (context.Context, cio.Attach) { + fake.taskMutex.RLock() + defer fake.taskMutex.RUnlock() + argsForCall := fake.taskArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeContainer) TaskReturns(result1 containerd.Task, result2 error) { + fake.taskMutex.Lock() + defer fake.taskMutex.Unlock() + fake.TaskStub = nil + fake.taskReturns = struct { + result1 containerd.Task + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) TaskReturnsOnCall(i int, result1 containerd.Task, result2 error) { + fake.taskMutex.Lock() + defer fake.taskMutex.Unlock() + fake.TaskStub = nil + if fake.taskReturnsOnCall == nil { + fake.taskReturnsOnCall = make(map[int]struct { + result1 containerd.Task + result2 error + }) + } + fake.taskReturnsOnCall[i] = struct { + result1 containerd.Task + result2 error + }{result1, result2} +} + +func (fake *FakeContainer) Update(arg1 context.Context, arg2 ...containerd.UpdateContainerOpts) error { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 context.Context + arg2 []containerd.UpdateContainerOpts + }{arg1, arg2}) + fake.recordInvocation("Update", []interface{}{arg1, arg2}) + fake.updateMutex.Unlock() + if fake.UpdateStub != nil { + return fake.UpdateStub(arg1, arg2...) + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.updateReturns + return fakeReturns.result1 +} + +func (fake *FakeContainer) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *FakeContainer) UpdateCalls(stub func(context.Context, ...containerd.UpdateContainerOpts) error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *FakeContainer) UpdateArgsForCall(i int) (context.Context, []containerd.UpdateContainerOpts) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeContainer) UpdateReturns(result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeContainer) UpdateReturnsOnCall(i int, result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeContainer) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.checkpointMutex.RLock() + defer fake.checkpointMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.extensionsMutex.RLock() + defer fake.extensionsMutex.RUnlock() + fake.iDMutex.RLock() + defer fake.iDMutex.RUnlock() + fake.imageMutex.RLock() + defer fake.imageMutex.RUnlock() + fake.infoMutex.RLock() + defer fake.infoMutex.RUnlock() + fake.labelsMutex.RLock() + defer fake.labelsMutex.RUnlock() + fake.newTaskMutex.RLock() + defer fake.newTaskMutex.RUnlock() + fake.setLabelsMutex.RLock() + defer fake.setLabelsMutex.RUnlock() + fake.specMutex.RLock() + defer fake.specMutex.RUnlock() + fake.taskMutex.RLock() + defer fake.taskMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeContainer) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ containerd.Container = new(FakeContainer) diff --git a/worker/backend/spec/capabilities.go b/worker/backend/spec/capabilities.go new file mode 100644 index 000000000..b38c32f4a --- /dev/null +++ b/worker/backend/spec/capabilities.go @@ -0,0 +1,85 @@ +package spec + +import "github.com/opencontainers/runtime-spec/specs-go" + +func OciCapabilities(privileged bool) specs.LinuxCapabilities { + if !privileged { + return UnprivilegedContainerCapabilities + } + + return PrivilegedContainerCapabilities +} + +var ( + PrivilegedContainerCapabilities = specs.LinuxCapabilities{ + Effective: privilegedCaps, + Bounding: privilegedCaps, + Inheritable: privilegedCaps, + Permitted: privilegedCaps, + } + + UnprivilegedContainerCapabilities = specs.LinuxCapabilities{ + Effective: unprivilegedCaps, + Bounding: unprivilegedCaps, + Inheritable: unprivilegedCaps, + Permitted: unprivilegedCaps, + } + + unprivilegedCaps = []string{ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT", + } + + privilegedCaps = []string{ + "CAP_AUDIT_CONTROL", + "CAP_AUDIT_READ", + "CAP_AUDIT_WRITE", + "CAP_BLOCK_SUSPEND", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_KILL", + "CAP_LEASE", + "CAP_LINUX_IMMUTABLE", + "CAP_MAC_ADMIN", + "CAP_MAC_OVERRIDE", + "CAP_MKNOD", + "CAP_NET_ADMIN", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYSLOG", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_CHROOT", + "CAP_SYS_MODULE", + "CAP_SYS_NICE", + "CAP_SYS_PACCT", + "CAP_SYS_PTRACE", + "CAP_SYS_RAWIO", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_WAKE_ALARM", + } +) diff --git a/worker/backend/spec/devices.go b/worker/backend/spec/devices.go new file mode 100644 index 000000000..0751e2b68 --- /dev/null +++ b/worker/backend/spec/devices.go @@ -0,0 +1,28 @@ +package spec + +import "github.com/opencontainers/runtime-spec/specs-go" + +var ( + AnyContainerDevices = []specs.LinuxDeviceCgroup{ + // runc allows these + {Access: "m", Type: "c", Major: deviceWildcard(), Minor: deviceWildcard(), Allow: true}, + {Access: "m", Type: "b", Major: deviceWildcard(), Minor: deviceWildcard(), Allow: true}, + + {Access: "rwm", Type: "c", Major: intRef(1), Minor: intRef(3), Allow: true}, // /dev/null + {Access: "rwm", Type: "c", Major: intRef(1), Minor: intRef(8), Allow: true}, // /dev/random + {Access: "rwm", Type: "c", Major: intRef(1), Minor: intRef(7), Allow: true}, // /dev/full + {Access: "rwm", Type: "c", Major: intRef(5), Minor: intRef(0), Allow: true}, // /dev/tty + {Access: "rwm", Type: "c", Major: intRef(1), Minor: intRef(5), Allow: true}, // /dev/zero + {Access: "rwm", Type: "c", Major: intRef(1), Minor: intRef(9), Allow: true}, // /dev/urandom + {Access: "rwm", Type: "c", Major: intRef(5), Minor: intRef(1), Allow: true}, // /dev/console + {Access: "rwm", Type: "c", Major: intRef(136), Minor: deviceWildcard(), Allow: true}, // /dev/pts/* + {Access: "rwm", Type: "c", Major: intRef(5), Minor: intRef(2), Allow: true}, // /dev/ptmx + {Access: "rwm", Type: "c", Major: intRef(10), Minor: intRef(200), Allow: true}, // /dev/net/tun + + // we allow this + {Access: "rwm", Type: "c", Major: intRef(10), Minor: intRef(229), Allow: true}, // /dev/fuse + } +) + +func intRef(i int64) *int64 { return &i } +func deviceWildcard() *int64 { return intRef(-1) } diff --git a/worker/backend/spec/mounts.go b/worker/backend/spec/mounts.go new file mode 100644 index 000000000..31c5416df --- /dev/null +++ b/worker/backend/spec/mounts.go @@ -0,0 +1,59 @@ +package spec + +import "github.com/opencontainers/runtime-spec/specs-go" + +var ( + InitMount = specs.Mount{ + Source: "/usr/local/concourse/bin/init", + Destination: "/tmp/gdn-init", + Type: "bind", + Options: []string{"bind"}, + } + + AnyContainerMounts = []specs.Mount{ + InitMount, // ours + + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/run", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + } +) diff --git a/worker/backend/spec/namespaces.go b/worker/backend/spec/namespaces.go new file mode 100644 index 000000000..4047a5bea --- /dev/null +++ b/worker/backend/spec/namespaces.go @@ -0,0 +1,25 @@ +package spec + +import "github.com/opencontainers/runtime-spec/specs-go" + +var ( + PrivilegedContainerNamespaces = []specs.LinuxNamespace{ + {Type: specs.PIDNamespace}, + {Type: specs.IPCNamespace}, + {Type: specs.UTSNamespace}, + {Type: specs.MountNamespace}, + {Type: specs.NetworkNamespace}, + } + + UnprivilegedContainerNamespaces = append(PrivilegedContainerNamespaces, + specs.LinuxNamespace{Type: specs.UserNamespace}, + ) +) + +func OciNamespaces(privileged bool) []specs.LinuxNamespace { + if !privileged { + return UnprivilegedContainerNamespaces + } + + return PrivilegedContainerNamespaces +} diff --git a/worker/backend/spec/spec.go b/worker/backend/spec/spec.go new file mode 100644 index 000000000..6c548382b --- /dev/null +++ b/worker/backend/spec/spec.go @@ -0,0 +1,182 @@ +package spec + +import ( + "fmt" + "path/filepath" + "strings" + + "code.cloudfoundry.org/garden" + "github.com/imdario/mergo" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// OciSpec converts a given `garden` container specification to an OCI spec. +// +// TODO +// - limits +// - masked paths +// - rootfs propagation +// - seccomp +// - user namespaces: uid/gid mappings +// x capabilities +// x devices +// x env +// x hostname +// x mounts +// x namespaces +// x rootfs +// +// +func OciSpec(gdn garden.ContainerSpec) (oci *specs.Spec, err error) { + var ( + rootfs string + mounts []specs.Mount + ) + + if gdn.Handle == "" { + err = fmt.Errorf("handle must be specified") + return + } + + if gdn.RootFSPath == "" { + gdn.RootFSPath = gdn.Image.URI + } + + rootfs, err = rootfsDir(gdn.RootFSPath) + if err != nil { + return + } + + mounts, err = OciSpecBindMounts(gdn.BindMounts) + if err != nil { + return + } + + oci = merge(defaultGardenOciSpec(gdn.Privileged), &specs.Spec{ + Version: specs.Version, + Hostname: gdn.Handle, + Process: &specs.Process{ + Env: gdn.Env, + }, + Root: &specs.Root{Path: rootfs}, + Mounts: mounts, + Annotations: map[string]string(gdn.Properties), + // Linux: &specs.Linux{ + // Resources: &specs.LinuxResources{Memory: nil, Cpu: nil}, + // }, + }) + + return +} + +// OciSpecBindMounts converts garden bindmounts to oci spec mounts. +// +func OciSpecBindMounts(bindMounts []garden.BindMount) (mounts []specs.Mount, err error) { + for _, bindMount := range bindMounts { + if bindMount.SrcPath == "" || bindMount.DstPath == "" { + err = fmt.Errorf("src and dst must not be empty") + return + } + + if !filepath.IsAbs(bindMount.SrcPath) || !filepath.IsAbs(bindMount.DstPath) { + err = fmt.Errorf("src and dst must be absolute") + return + } + + if bindMount.Origin != garden.BindMountOriginHost { + err = fmt.Errorf("unknown bind mount origin %d", bindMount.Origin) + return + } + + mode := "ro" + switch bindMount.Mode { + case garden.BindMountModeRO: + case garden.BindMountModeRW: + mode = "rw" + default: + err = fmt.Errorf("unknown bind mount mode %d", bindMount.Mode) + return + } + + mounts = append(mounts, specs.Mount{ + Source: bindMount.SrcPath, + Destination: bindMount.DstPath, + Type: "bind", + Options: []string{"bind", mode}, + }) + } + + return +} + +// defaultGardenOciSpec repreeseents a default set of properties necessary in +// order to satisfy the garden interface. +// +// ps.: this spec is NOT complet - it must be merged with more properties to +// form a properly working container. +// +func defaultGardenOciSpec(privileged bool) *specs.Spec { + var ( + namespaces = OciNamespaces(privileged) + capabilities = OciCapabilities(privileged) + ) + + return &specs.Spec{ + Process: &specs.Process{ + Args: []string{"/tmp/gdn-init"}, + Capabilities: &capabilities, + Cwd: "/", + }, + Linux: &specs.Linux{ + Namespaces: namespaces, + Resources: &specs.LinuxResources{ + Devices: AnyContainerDevices, + }, + }, + Mounts: AnyContainerMounts, + } +} + +// merge merges an OCI spec `dst` into `src`. +// +func merge(dst, src *specs.Spec) *specs.Spec { + err := mergo.Merge(dst, src, mergo.WithAppendSlice) + if err != nil { + panic(fmt.Errorf( + "failed to merge specs %v %v - programming mistake? %w", + dst, src, err, + )) + } + + return dst +} + +// rootfsDir takes a raw rootfs uri and extracts the directory that it points to, +// if using a valid scheme (`raw://`) +// +func rootfsDir(raw string) (directory string, err error) { + if raw == "" { + err = fmt.Errorf("rootfs must not be empty") + return + } + + parts := strings.SplitN(raw, "://", 2) + if len(parts) != 2 { + err = fmt.Errorf("malformatted rootfs: must be of form 'scheme://'") + return + } + + var scheme string + scheme, directory = parts[0], parts[1] + if scheme != "raw" { + err = fmt.Errorf("unsupported scheme '%s'", scheme) + return + } + + if !filepath.IsAbs(directory) { + err = fmt.Errorf("directory must be an absolute path") + return + } + + return +} diff --git a/worker/backend/spec/spec_test.go b/worker/backend/spec/spec_test.go new file mode 100644 index 000000000..a09dfa37b --- /dev/null +++ b/worker/backend/spec/spec_test.go @@ -0,0 +1,299 @@ +package spec_test + +import ( + "testing" + + "code.cloudfoundry.org/garden" + "github.com/concourse/concourse/worker/backend/spec" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type Suite struct { + suite.Suite + *require.Assertions +} + +func (s *Suite) TestContainerSpecValidations() { + for _, tc := range []struct { + desc string + spec garden.ContainerSpec + }{ + { + desc: "no handle specified", + spec: garden.ContainerSpec{}, + }, + { + desc: "rootfsPath not specified", + spec: garden.ContainerSpec{ + Handle: "handle", + }, + }, + { + desc: "rootfsPath without scheme", + spec: garden.ContainerSpec{ + Handle: "handle", + RootFSPath: "foo", + }, + }, + { + desc: "rootfsPath with unknown scheme", + spec: garden.ContainerSpec{ + Handle: "handle", + RootFSPath: "weird://foo", + }, + }, + { + desc: "rootfsPath not being absolute", + spec: garden.ContainerSpec{ + Handle: "handle", + RootFSPath: "raw://../not/absolute/at/all", + }, + }, + { + desc: "both rootfsPath and image specified", + spec: garden.ContainerSpec{ + Handle: "handle", + RootFSPath: "foo", + Image: garden.ImageRef{URI: "bar"}, + }, + }, + { + desc: "no rootfsPath, but image specified w/out scheme", + spec: garden.ContainerSpec{ + Handle: "handle", + Image: garden.ImageRef{URI: "bar"}, + }, + }, + { + desc: "no rootfsPath, but image specified w/ unknown scheme", + spec: garden.ContainerSpec{ + Handle: "handle", + Image: garden.ImageRef{URI: "weird://bar"}, + }, + }, + } { + s.T().Run(tc.desc, func(t *testing.T) { + _, err := spec.OciSpec(tc.spec) + s.Error(err) + }) + } +} + +func (s *Suite) TestOciSpecBindMounts() { + for _, tc := range []struct { + desc string + mounts []garden.BindMount + expected []specs.Mount + succeeds bool + }{ + { + desc: "unknown mode", + succeeds: false, + mounts: []garden.BindMount{ + { + SrcPath: "/a", + DstPath: "/b", + Mode: 123, + Origin: garden.BindMountOriginHost, + }, + }, + }, + { + desc: "unknown origin", + succeeds: false, + mounts: []garden.BindMount{ + { + SrcPath: "/a", + DstPath: "/b", + Mode: garden.BindMountModeRO, + Origin: 123, + }, + }, + }, + { + desc: "w/out src", + succeeds: false, + mounts: []garden.BindMount{ + { + DstPath: "/b", + Mode: garden.BindMountModeRO, + Origin: garden.BindMountOriginHost, + }, + }, + }, + { + desc: "non-absolute src", + succeeds: false, + mounts: []garden.BindMount{ + { + DstPath: "/b", + Mode: garden.BindMountModeRO, + Origin: garden.BindMountOriginHost, + }, + }, + }, + { + desc: "w/out dest", + succeeds: false, + mounts: []garden.BindMount{ + { + SrcPath: "/a", + Mode: garden.BindMountModeRO, + Origin: garden.BindMountOriginHost, + }, + }, + }, + { + desc: "non-absolute dest", + succeeds: false, + mounts: []garden.BindMount{ + { + DstPath: "/b", + Mode: garden.BindMountModeRO, + Origin: garden.BindMountOriginHost, + }, + }, + }, + } { + s.T().Run(tc.desc, func(t *testing.T) { + actual, err := spec.OciSpecBindMounts(tc.mounts) + if !tc.succeeds { + s.Error(err) + return + } + + s.NoError(err) + s.Equal(tc.expected, actual) + }) + } +} + +func (s *Suite) TestOciNamespaces() { + for _, tc := range []struct { + desc string + privileged bool + expected []specs.LinuxNamespace + }{ + { + desc: "privileged", + privileged: true, + expected: spec.PrivilegedContainerNamespaces, + }, + { + desc: "unprivileged", + privileged: false, + expected: spec.UnprivilegedContainerNamespaces, + }, + } { + s.T().Run(tc.desc, func(t *testing.T) { + s.Equal(tc.expected, spec.OciNamespaces(tc.privileged)) + }) + } +} + +func (s *Suite) TestOciCapabilities() { + for _, tc := range []struct { + desc string + privileged bool + expected specs.LinuxCapabilities + }{ + { + desc: "privileged", + privileged: true, + expected: spec.PrivilegedContainerCapabilities, + }, + { + desc: "unprivileged", + privileged: false, + expected: spec.UnprivilegedContainerCapabilities, + }, + } { + s.T().Run(tc.desc, func(t *testing.T) { + s.Equal(tc.expected, spec.OciCapabilities(tc.privileged)) + }) + } +} + +func (s *Suite) TestContainerSpec() { + var minimalContainerSpec = garden.ContainerSpec{ + Handle: "handle", RootFSPath: "raw:///rootfs", + } + + for _, tc := range []struct { + desc string + gdn garden.ContainerSpec + check func(*specs.Spec) + }{ + { + desc: "defaults", + gdn: minimalContainerSpec, + check: func(oci *specs.Spec) { + s.Equal("/", oci.Process.Cwd) + s.Equal([]string{"/tmp/gdn-init"}, oci.Process.Args) + s.Equal(oci.Mounts, spec.AnyContainerMounts) + + s.Equal(minimalContainerSpec.Handle, oci.Hostname) + s.Equal(spec.AnyContainerDevices, oci.Linux.Resources.Devices) + }, + }, + { + desc: "env", + gdn: garden.ContainerSpec{ + Handle: "handle", RootFSPath: "raw:///rootfs", + Env: []string{"foo=bar"}, + }, + check: func(oci *specs.Spec) { + s.Equal([]string{"foo=bar"}, oci.Process.Env) + }, + }, + { + desc: "mounts", + gdn: garden.ContainerSpec{ + Handle: "handle", RootFSPath: "raw:///rootfs", + BindMounts: []garden.BindMount{ + { // ro mount + SrcPath: "/a", + DstPath: "/b", + Mode: garden.BindMountModeRO, + Origin: garden.BindMountOriginHost, + }, + { // rw mount + SrcPath: "/a", + DstPath: "/b", + Mode: garden.BindMountModeRW, + Origin: garden.BindMountOriginHost, + }, + }, + }, + check: func(oci *specs.Spec) { + s.Contains(oci.Mounts, specs.Mount{ + Source: "/a", + Destination: "/b", + Type: "bind", + Options: []string{"bind", "ro"}, + }) + s.Contains(oci.Mounts, specs.Mount{ + Source: "/a", + Destination: "/b", + Type: "bind", + Options: []string{"bind", "rw"}, + }) + }, + }, + } { + s.T().Run(tc.desc, func(t *testing.T) { + actual, err := spec.OciSpec(tc.gdn) + s.NoError(err) + + tc.check(actual) + }) + } +} + +func TestSuite(t *testing.T) { + suite.Run(t, &Suite{ + Assertions: require.New(t), + }) +} diff --git a/worker/workercmd/containerd.go b/worker/workercmd/containerd.go index 1fe454bc2..078c70175 100644 --- a/worker/workercmd/containerd.go +++ b/worker/workercmd/containerd.go @@ -18,9 +18,12 @@ import ( ) func containerdGardenServerRunner(logger lager.Logger, bindAddr, containerdAddr string) ifrit.Runner { - const graceTime = 0 + const ( + graceTime = 0 + namespace = "concourse" + ) - backend := backend.New(libcontainerd.New(containerdAddr)) + backend := backend.New(libcontainerd.New(containerdAddr), namespace) server := server.New("tcp", bindAddr, graceTime,