Merge pull request #6550 from vito/no-packr

Switch migrations + web assets to Go 1.16 embedding
This commit is contained in:
Alex Suraci 2021-02-21 16:51:40 -05:00 committed by GitHub
commit 5530bdc6ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 386 additions and 682 deletions

View File

@ -140,14 +140,14 @@ opaque.
You'll need a few things installed in order to build, test and run Concourse during
development:
* [`go`](https://golang.org/dl/) v1.13+
* [`go`](https://golang.org/dl/) v1.16+
* [`git`](https://git-scm.com/) v2.11+
* [`yarn`](https://yarnpkg.com/en/docs/install)
* [`docker-compose`](https://docs.docker.com/compose/install/)
* [`postgresql`](https://www.postgresql.org/download/)
> *Concourse uses Go 1.11's module system, so make sure it's **not** cloned
> under your `$GOPATH`.*
> *Concourse uses Go's module system, so make sure it's **not** cloned under
> your `$GOPATH`.*
## Running Concourse

View File

@ -14,13 +14,15 @@ RUN grep '^replace' go.mod || go mod download
COPY ./cmd/init/init.c /tmp/init.c
RUN gcc -O2 -static -o /usr/local/concourse/bin/init /tmp/init.c && rm /tmp/init.c
# build Concourse without using 'packr' so that the volume in the next stage
# can live-update
# copy the rest separately so we don't constantly rebuild init
COPY . .
# build 'concourse' binary
RUN go build -gcflags=all="-N -l" -o /usr/local/concourse/bin/concourse \
./cmd/concourse
RUN set -x && \
go build -ldflags '-extldflags "-static"' -o /tmp/fly ./fly && \
# build 'fly' binary and update web CLI asset
RUN go build -ldflags '-extldflags "-static"' -o /tmp/fly ./fly && \
tar -C /tmp -czf /usr/local/concourse/fly-assets/fly-$(go env GOOS)-$(go env GOARCH).tgz fly && \
rm /tmp/fly
@ -29,3 +31,4 @@ FROM base
# set up a volume so locally built web UI changes auto-propagate
VOLUME /src
ENV CONCOURSE_WEB_PUBLIC_DIR=/src/web/public

View File

@ -161,6 +161,7 @@ type RunCommand struct {
GardenRequestTimeout time.Duration `long:"garden-request-timeout" default:"5m" description:"How long to wait for requests to Garden to complete. 0 means no timeout."`
CLIArtifactsDir flag.Dir `long:"cli-artifacts-dir" description:"Directory containing downloadable CLI binaries."`
WebPublicDir flag.Dir `long:"web-public-dir" description:"Web public/ directory to serve live for local development."`
Metrics struct {
HostName string `long:"metrics-host-name" description:"Host string to attach to emitted metrics."`
@ -1357,7 +1358,7 @@ func (cmd *RunCommand) oldKey() *encryption.Key {
}
func (cmd *RunCommand) constructWebHandler(logger lager.Logger) (http.Handler, error) {
webHandler, err := web.NewHandler(logger)
webHandler, err := web.NewHandler(logger, cmd.WebPublicDir.Path())
if err != nil {
return nil, err
}

View File

@ -11,7 +11,6 @@ import (
"github.com/concourse/concourse/atc/db/encryption"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/db/migration"
"github.com/concourse/concourse/atc/db/migration/migrationfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -23,7 +22,6 @@ var _ = Describe("Encryption", func() {
db *sql.DB
lockDB *sql.DB
lockFactory lock.LockFactory
bindata *migrationfakes.FakeBindata
fakeLogFunc = func(logger lager.Logger, id lock.LockID) {}
)
@ -35,9 +33,6 @@ var _ = Describe("Encryption", func() {
Expect(err).NotTo(HaveOccurred())
lockFactory = lock.NewLockFactory(lockDB, fakeLogFunc, fakeLogFunc)
bindata = new(migrationfakes.FakeBindata)
bindata.AssetStub = asset
})
AfterEach(func() {

View File

@ -2,8 +2,10 @@ package migration
import (
"database/sql"
"embed"
"errors"
"fmt"
"io/fs"
"sort"
"time"
@ -11,7 +13,6 @@ import (
"github.com/concourse/concourse/atc/db/encryption"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/db/migration/migrations"
"github.com/gobuffalo/packr"
multierror "github.com/hashicorp/go-multierror"
_ "github.com/lib/pq"
)
@ -150,39 +151,74 @@ type Migrator interface {
Migrations() ([]migration, error)
}
//go:embed migrations
var migrationsEmbed embed.FS
func NewMigrator(db *sql.DB, lockFactory lock.LockFactory) Migrator {
return NewMigratorForMigrations(db, lockFactory, &packrSource{packr.NewBox("./migrations")})
migrationsFS, err := fs.Sub(migrationsEmbed, "migrations")
if err != nil {
// impossible due to const value arg
panic(err)
}
return NewMigratorForMigrations(db, lockFactory, migrationsFS)
}
func NewMigratorForMigrations(db *sql.DB, lockFactory lock.LockFactory, bindata Bindata) Migrator {
func NewMigratorForMigrations(db *sql.DB, lockFactory lock.LockFactory, migrationsFS fs.FS) Migrator {
return &migrator{
db,
lockFactory,
lager.NewLogger("migrations"),
bindata,
migrationsFS,
}
}
type migrator struct {
db *sql.DB
lockFactory lock.LockFactory
logger lager.Logger
bindata Bindata
db *sql.DB
lockFactory lock.LockFactory
logger lager.Logger
migrationsFS fs.FS
}
func (helper *migrator) Migrations() ([]migration, error) {
migrationList := []migration{}
assets, err := fs.ReadDir(helper.migrationsFS, ".")
if err != nil {
return nil, err
}
var parser = NewParser(helper.migrationsFS)
for _, asset := range assets {
if asset.Name() == "migrations.go" {
// special file declaring type for Go migrations
continue
}
parsedMigration, err := parser.ParseFileToMigration(asset.Name())
if err != nil {
return nil, fmt.Errorf("parse migration filename %s: %w", asset.Name(), err)
}
migrationList = append(migrationList, parsedMigration)
}
sortMigrations(migrationList)
return migrationList, nil
}
func (m *migrator) SupportedVersion() (int, error) {
matches := []migration{}
assets := m.bindata.AssetNames()
var parser = NewParser(m.bindata)
for _, match := range assets {
if migration, err := parser.ParseMigrationFilename(match); err == nil {
matches = append(matches, migration)
}
migrations, err := m.Migrations()
if err != nil {
return 0, fmt.Errorf("list migrations: %w", err)
}
sortMigrations(matches)
return matches[len(matches)-1].Version, nil
if len(migrations) == 0 {
return 0, fmt.Errorf("no migrations")
}
return migrations[len(migrations)-1].Version, nil
}
func (helper *migrator) CurrentVersion() (int, error) {
@ -380,23 +416,6 @@ func (m *migrator) runMigration(migration migration, strategy encryption.Strateg
return err
}
func (helper *migrator) Migrations() ([]migration, error) {
migrationList := []migration{}
assets := helper.bindata.AssetNames()
var parser = NewParser(helper.bindata)
for _, assetName := range assets {
parsedMigration, err := parser.ParseFileToMigration(assetName)
if err != nil {
return nil, err
}
migrationList = append(migrationList, parsedMigration)
}
sortMigrations(migrationList)
return migrationList, nil
}
func (helper *migrator) Up(newKey, oldKey *encryption.Key) error {
migrations, err := helper.Migrations()
if err != nil {

View File

@ -1,12 +1,11 @@
package migration_test
import (
"testing"
"github.com/concourse/concourse/atc/postgresrunner"
"github.com/gobuffalo/packr"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestMigration(t *testing.T) {
@ -25,5 +24,3 @@ var _ = BeforeEach(func() {
var _ = AfterEach(func() {
postgresRunner.DropTestDB()
})
var asset = packr.NewBox("./migrations").MustBytes

View File

@ -2,18 +2,21 @@ package migration_test
import (
"database/sql"
"io/fs"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"testing/fstest"
"time"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/db/migration"
"github.com/concourse/concourse/atc/db/migration/migrationfakes"
"github.com/lib/pq"
. "github.com/onsi/ginkgo"
@ -29,7 +32,6 @@ var _ = Describe("Migration", func() {
db *sql.DB
lockDB *sql.DB
lockFactory lock.LockFactory
bindata *migrationfakes.FakeBindata
fakeLogFunc = func(logger lager.Logger, id lock.LockID) {}
)
@ -41,9 +43,6 @@ var _ = Describe("Migration", func() {
Expect(err).NotTo(HaveOccurred())
lockFactory = lock.NewLockFactory(lockDB, fakeLogFunc, fakeLogFunc)
bindata = new(migrationfakes.FakeBindata)
bindata.AssetStub = asset
})
AfterEach(func() {
@ -62,26 +61,16 @@ var _ = Describe("Migration", func() {
Context("Version Check", func() {
It("CurrentVersion reports the current version stored in the database", func() {
bindata.AssetNamesReturns([]string{
"1000_some_migration.up.sql",
"1510262030_initial_schema.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.up.sql",
"2000000000_latest_migration_does_not_matter.up.sql",
})
bindata.AssetStub = func(name string) ([]byte, error) {
if name == "1000_some_migration.up.sql" {
return []byte{}, nil
} else if name == "2000000000_latest_migration_does_not_matter.up.sql" {
return []byte{}, nil
}
return asset(name)
}
myDatabaseVersion := 1234567890
SetupMigrationsHistoryTableToExistAtVersion(db, myDatabaseVersion)
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
migrator := migration.NewMigratorForMigrations(db, lockFactory, fstest.MapFS{
"1000_some_migration.up.sql": &fstest.MapFile{},
"1510262030_initial_schema.up.sql": &fstest.MapFile{},
"1510670987_update_unique_constraint_for_resource_caches.up.sql": &fstest.MapFile{},
"2000000000_latest_migration_does_not_matter.up.sql": &fstest.MapFile{},
})
version, err := migrator.CurrentVersion()
Expect(err).NotTo(HaveOccurred())
@ -89,17 +78,15 @@ var _ = Describe("Migration", func() {
})
It("SupportedVersion reports the highest supported migration version", func() {
SetupMigrationsHistoryTableToExistAtVersion(db, initialSchemaVersion)
bindata.AssetNamesReturns([]string{
"1000_some_migration.up.sql",
"1510262030_initial_schema.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.up.sql",
"300000_this_is_to_prove_we_dont_use_string_sort.up.sql",
"2000000000_latest_migration.up.sql",
migrator := migration.NewMigratorForMigrations(db, lockFactory, fstest.MapFS{
"1000_some_migration.up.sql": &fstest.MapFile{},
"1510262030_initial_schema.up.sql": &fstest.MapFile{},
"1510670987_update_unique_constraint_for_resource_caches.up.sql": &fstest.MapFile{},
"300000_this_is_to_prove_we_dont_use_string_sort.up.sql": &fstest.MapFile{},
"2000000000_latest_migration.up.sql": &fstest.MapFile{},
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
version, err := migrator.SupportedVersion()
Expect(err).NotTo(HaveOccurred())
@ -107,18 +94,16 @@ var _ = Describe("Migration", func() {
})
It("Ignores files it can't parse", func() {
SetupMigrationsHistoryTableToExistAtVersion(db, initialSchemaVersion)
bindata.AssetNamesReturns([]string{
"1000_some_migration.up.sql",
"1510262030_initial_schema.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.up.sql",
"300000_this_is_to_prove_we_dont_use_string_sort.up.sql",
"2000000000_latest_migration.up.sql",
"migrations.go",
migrator := migration.NewMigratorForMigrations(db, lockFactory, fstest.MapFS{
"1000_some_migration.up.sql": &fstest.MapFile{},
"1510262030_initial_schema.up.sql": &fstest.MapFile{},
"1510670987_update_unique_constraint_for_resource_caches.up.sql": &fstest.MapFile{},
"300000_this_is_to_prove_we_dont_use_string_sort.up.sql": &fstest.MapFile{},
"2000000000_latest_migration.up.sql": &fstest.MapFile{},
"migrations.go": &fstest.MapFile{},
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
version, err := migrator.SupportedVersion()
Expect(err).NotTo(HaveOccurred())
@ -208,19 +193,16 @@ var _ = Describe("Migration", func() {
Context("sql migrations", func() {
It("runs a migration", func() {
simpleMigrationFilename := "1000_test_table_created.up.sql"
bindata.AssetReturns([]byte(`
BEGIN;
CREATE TABLE some_table (id integer);
COMMIT;
`), nil)
bindata.AssetNamesReturns([]string{
simpleMigrationFilename,
migrator := migration.NewMigratorForMigrations(db, lockFactory, fstest.MapFS{
"1000_test_table_created.up.sql": &fstest.MapFile{
Data: []byte(`
BEGIN;
CREATE TABLE some_table (id integer);
COMMIT;
`),
},
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
migrations, err := migrator.Migrations()
Expect(err).NotTo(HaveOccurred())
Expect(len(migrations)).To(Equal(1))
@ -241,22 +223,16 @@ var _ = Describe("Migration", func() {
It("ignores migrations before the current version", func() {
SetupMigrationsHistoryTableToExistAtVersion(db, 1000)
simpleMigrationFilename := "1000_test_table_created.up.sql"
bindata.AssetStub = func(name string) ([]byte, error) {
if name == simpleMigrationFilename {
return []byte(`
BEGIN;
CREATE TABLE some_table (id integer);
COMMIT;
`), nil
}
return asset(name)
}
bindata.AssetNamesReturns([]string{
simpleMigrationFilename,
migrator := migration.NewMigratorForMigrations(db, lockFactory, fstest.MapFS{
"1000_test_table_created.up.sql": &fstest.MapFile{
Data: []byte(`
BEGIN;
CREATE TABLE some_table (id integer);
COMMIT;
`),
},
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
err := migrator.Up(nil, nil)
Expect(err).NotTo(HaveOccurred())
@ -268,90 +244,99 @@ var _ = Describe("Migration", func() {
})
It("runs the up migrations in ascending order", func() {
addTableMigrationFilename := "1000_test_table_created.up.sql"
removeTableMigrationFilename := "1001_test_table_created.up.sql"
bindata.AssetStub = func(name string) ([]byte, error) {
if name == addTableMigrationFilename {
return []byte(`
BEGIN;
CREATE TABLE some_table (id integer);
COMMIT;
`), nil
} else if name == removeTableMigrationFilename {
return []byte(`
BEGIN;
DROP TABLE some_table;
COMMIT;
`), nil
}
return asset(name)
}
bindata.AssetNamesReturns([]string{
removeTableMigrationFilename,
addTableMigrationFilename,
migrator := migration.NewMigratorForMigrations(db, lockFactory, fstest.MapFS{
"1000_test_table_created.up.sql": &fstest.MapFile{
Data: []byte(`
BEGIN;
CREATE TABLE some_table (id integer);
COMMIT;
`),
},
"1001_test_table_created.up.sql": &fstest.MapFile{
Data: []byte(`
BEGIN;
DROP TABLE some_table;
COMMIT;
`),
},
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
err := migrator.Up(nil, nil)
Expect(err).NotTo(HaveOccurred())
})
Context("when sql migrations fail", func() {
BeforeEach(func() {
bindata.AssetNamesReturns([]string{
"1510262030_initial_schema.up.sql",
"1525724789_drop_reaper_addr_from_workers.up.sql",
})
})
It("rolls back and leaves the database clean", func() {
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
migrator := migration.NewMigratorForMigrations(db, lockFactory, fstest.MapFS{
"1000_test_table_created.up.sql": &fstest.MapFile{
Data: []byte(`
BEGIN;
CREATE TABLE some_table (id integer);
COMMIT;
`),
},
"1000_test_table_created.down.sql": &fstest.MapFile{
Data: []byte(`
BEGIN;
DROP TABLE some_table;
COMMIT;
`),
},
"1001_drop_bogus_table.up.sql": &fstest.MapFile{
Data: []byte(`
BEGIN;
DROP TABLE some_bogus_table;
COMMIT;
`),
},
})
err := migrator.Up(nil, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed and was rolled back"))
ExpectDatabaseMigrationVersionToEqual(migrator, initialSchemaVersion)
ExpectMigrationToHaveFailed(db, 1525724789, false)
ExpectDatabaseMigrationVersionToEqual(migrator, 1000)
ExpectMigrationToHaveFailed(db, 1001, false)
})
})
It("Doesn't fail if there are no migrations to run", func() {
bindata.AssetNamesReturns([]string{
"1510262030_initial_schema.up.sql",
migrator := migration.NewMigratorForMigrations(db, lockFactory, fstest.MapFS{
"1000_test_table_created.up.sql": &fstest.MapFile{
Data: []byte(`
BEGIN;
CREATE TABLE some_table (id integer);
COMMIT;
`),
},
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
err := migrator.Up(nil, nil)
Expect(err).NotTo(HaveOccurred())
err = migrator.Up(nil, nil)
Expect(err).NotTo(HaveOccurred())
ExpectDatabaseMigrationVersionToEqual(migrator, initialSchemaVersion)
ExpectDatabaseMigrationVersionToEqual(migrator, 1000)
ExpectMigrationVersionTableNotToExist(db)
ExpectToBeAbleToInsertData(db)
})
It("Locks the database so multiple ATCs don't all run migrations at the same time", func() {
SetupMigrationsHistoryTableToExistAtVersion(db, 1510262030)
SetupSchemaFromFile(db, "migrations/1510262030_initial_schema.up.sql")
bindata.AssetNamesReturns([]string{
"1510262030_initial_schema.up.sql",
migrator := migration.NewMigratorForMigrations(db, lockFactory, fstest.MapFS{
"1000_test_table_created.up.sql": &fstest.MapFile{
Data: []byte(`
BEGIN;
CREATE TABLE some_table (id integer);
COMMIT;
`),
},
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
var wg sync.WaitGroup
wg.Add(3)
go TryRunUpAndVerifyResult(db, migrator, &wg)
go TryRunUpAndVerifyResult(db, migrator, &wg)
go TryRunUpAndVerifyResult(db, migrator, &wg)
go TryRunUpAndVerifyResult(db, migrator, 1000, &wg)
go TryRunUpAndVerifyResult(db, migrator, 1000, &wg)
go TryRunUpAndVerifyResult(db, migrator, 1000, &wg)
wg.Wait()
})
@ -359,12 +344,10 @@ var _ = Describe("Migration", func() {
Context("golang migrations", func() {
It("runs a migration with Migrate", func() {
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
bindata.AssetNamesReturns([]string{
migrator := migration.NewMigratorForMigrations(db, lockFactory, hackyRealMigrationsFS(
"1510262030_initial_schema.up.sql",
"1516643303_update_auth_providers.up.go",
})
))
By("applying the initial migration")
err := migrator.Migrate(nil, nil, 1510262030)
@ -386,12 +369,10 @@ var _ = Describe("Migration", func() {
})
It("runs a migration with Up", func() {
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
bindata.AssetNamesReturns([]string{
migrator := migration.NewMigratorForMigrations(db, lockFactory, hackyRealMigrationsFS(
"1510262030_initial_schema.up.sql",
"1516643303_update_auth_providers.up.go",
})
))
err := migrator.Up(nil, nil)
Expect(err).NotTo(HaveOccurred())
@ -411,12 +392,11 @@ var _ = Describe("Migration", func() {
Context("Downgrade", func() {
Context("Downgrades to a version that uses the old mattes/migrate schema_migrations table", func() {
It("Downgrades to a given version and write it to a new created schema_migrations table", func() {
bindata.AssetNamesReturns([]string{
migrator := migration.NewMigratorForMigrations(db, lockFactory, hackyRealMigrationsFS(
"1510262030_initial_schema.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.down.sql",
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
))
err := migrator.Up(nil, nil)
Expect(err).NotTo(HaveOccurred())
@ -438,13 +418,11 @@ var _ = Describe("Migration", func() {
})
It("Downgrades to a given version and write it to the existing schema_migrations table with dirty true", func() {
bindata.AssetNamesReturns([]string{
migrator := migration.NewMigratorForMigrations(db, lockFactory, hackyRealMigrationsFS(
"1510262030_initial_schema.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.down.sql",
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
))
err := migrator.Up(nil, nil)
Expect(err).NotTo(HaveOccurred())
@ -470,12 +448,11 @@ var _ = Describe("Migration", func() {
Context("Downgrades to a version with new migrations_history table", func() {
It("Downgrades to a given version", func() {
bindata.AssetNamesReturns([]string{
migrator := migration.NewMigratorForMigrations(db, lockFactory, hackyRealMigrationsFS(
"1510262030_initial_schema.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.down.sql",
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
))
err := migrator.Up(nil, nil)
Expect(err).NotTo(HaveOccurred())
@ -495,11 +472,10 @@ var _ = Describe("Migration", func() {
})
It("Doesn't fail if already at the requested version", func() {
bindata.AssetNamesReturns([]string{
migrator := migration.NewMigratorForMigrations(db, lockFactory, hackyRealMigrationsFS(
"1510262030_initial_schema.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.up.sql",
})
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
))
err := migrator.Migrate(nil, nil, upgradedSchemaVersion)
Expect(err).NotTo(HaveOccurred())
@ -519,12 +495,11 @@ var _ = Describe("Migration", func() {
})
It("Locks the database so multiple consumers don't run downgrade at the same time", func() {
migrator := migration.NewMigratorForMigrations(db, lockFactory, bindata)
bindata.AssetNamesReturns([]string{
migrator := migration.NewMigratorForMigrations(db, lockFactory, hackyRealMigrationsFS(
"1510262030_initial_schema.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.up.sql",
"1510670987_update_unique_constraint_for_resource_caches.down.sql",
})
))
err := migrator.Up(nil, nil)
Expect(err).NotTo(HaveOccurred())
@ -543,16 +518,32 @@ var _ = Describe("Migration", func() {
})
func TryRunUpAndVerifyResult(db *sql.DB, migrator migration.Migrator, wg *sync.WaitGroup) {
// this is pretty awful; anything using it relies on our real-live migrations,
// making these tests tightly coupled to Concourse instead of being a generic
// migration package
func hackyRealMigrationsFS(migrations ...string) fs.FS {
fs := fstest.MapFS{}
for _, m := range migrations {
content, err := os.ReadFile(filepath.Join("migrations", m))
Expect(err).ToNot(HaveOccurred())
fs[m] = &fstest.MapFile{
Data: content,
}
}
return fs
}
func TryRunUpAndVerifyResult(db *sql.DB, migrator migration.Migrator, version int, wg *sync.WaitGroup) {
defer GinkgoRecover()
defer wg.Done()
err := migrator.Up(nil, nil)
Expect(err).NotTo(HaveOccurred())
ExpectDatabaseMigrationVersionToEqual(migrator, initialSchemaVersion)
ExpectToBeAbleToInsertData(db)
ExpectDatabaseMigrationVersionToEqual(migrator, version)
}
func TryRunMigrateAndVerifyResult(db *sql.DB, migrator migration.Migrator, version int, wg *sync.WaitGroup) {

View File

@ -1,179 +0,0 @@
// Code generated by counterfeiter. DO NOT EDIT.
package migrationfakes
import (
"sync"
"github.com/concourse/concourse/atc/db/migration"
)
type FakeBindata struct {
AssetStub func(string) ([]byte, error)
assetMutex sync.RWMutex
assetArgsForCall []struct {
arg1 string
}
assetReturns struct {
result1 []byte
result2 error
}
assetReturnsOnCall map[int]struct {
result1 []byte
result2 error
}
AssetNamesStub func() []string
assetNamesMutex sync.RWMutex
assetNamesArgsForCall []struct {
}
assetNamesReturns struct {
result1 []string
}
assetNamesReturnsOnCall map[int]struct {
result1 []string
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakeBindata) Asset(arg1 string) ([]byte, error) {
fake.assetMutex.Lock()
ret, specificReturn := fake.assetReturnsOnCall[len(fake.assetArgsForCall)]
fake.assetArgsForCall = append(fake.assetArgsForCall, struct {
arg1 string
}{arg1})
fake.recordInvocation("Asset", []interface{}{arg1})
fake.assetMutex.Unlock()
if fake.AssetStub != nil {
return fake.AssetStub(arg1)
}
if specificReturn {
return ret.result1, ret.result2
}
fakeReturns := fake.assetReturns
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeBindata) AssetCallCount() int {
fake.assetMutex.RLock()
defer fake.assetMutex.RUnlock()
return len(fake.assetArgsForCall)
}
func (fake *FakeBindata) AssetCalls(stub func(string) ([]byte, error)) {
fake.assetMutex.Lock()
defer fake.assetMutex.Unlock()
fake.AssetStub = stub
}
func (fake *FakeBindata) AssetArgsForCall(i int) string {
fake.assetMutex.RLock()
defer fake.assetMutex.RUnlock()
argsForCall := fake.assetArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeBindata) AssetReturns(result1 []byte, result2 error) {
fake.assetMutex.Lock()
defer fake.assetMutex.Unlock()
fake.AssetStub = nil
fake.assetReturns = struct {
result1 []byte
result2 error
}{result1, result2}
}
func (fake *FakeBindata) AssetReturnsOnCall(i int, result1 []byte, result2 error) {
fake.assetMutex.Lock()
defer fake.assetMutex.Unlock()
fake.AssetStub = nil
if fake.assetReturnsOnCall == nil {
fake.assetReturnsOnCall = make(map[int]struct {
result1 []byte
result2 error
})
}
fake.assetReturnsOnCall[i] = struct {
result1 []byte
result2 error
}{result1, result2}
}
func (fake *FakeBindata) AssetNames() []string {
fake.assetNamesMutex.Lock()
ret, specificReturn := fake.assetNamesReturnsOnCall[len(fake.assetNamesArgsForCall)]
fake.assetNamesArgsForCall = append(fake.assetNamesArgsForCall, struct {
}{})
fake.recordInvocation("AssetNames", []interface{}{})
fake.assetNamesMutex.Unlock()
if fake.AssetNamesStub != nil {
return fake.AssetNamesStub()
}
if specificReturn {
return ret.result1
}
fakeReturns := fake.assetNamesReturns
return fakeReturns.result1
}
func (fake *FakeBindata) AssetNamesCallCount() int {
fake.assetNamesMutex.RLock()
defer fake.assetNamesMutex.RUnlock()
return len(fake.assetNamesArgsForCall)
}
func (fake *FakeBindata) AssetNamesCalls(stub func() []string) {
fake.assetNamesMutex.Lock()
defer fake.assetNamesMutex.Unlock()
fake.AssetNamesStub = stub
}
func (fake *FakeBindata) AssetNamesReturns(result1 []string) {
fake.assetNamesMutex.Lock()
defer fake.assetNamesMutex.Unlock()
fake.AssetNamesStub = nil
fake.assetNamesReturns = struct {
result1 []string
}{result1}
}
func (fake *FakeBindata) AssetNamesReturnsOnCall(i int, result1 []string) {
fake.assetNamesMutex.Lock()
defer fake.assetNamesMutex.Unlock()
fake.AssetNamesStub = nil
if fake.assetNamesReturnsOnCall == nil {
fake.assetNamesReturnsOnCall = make(map[int]struct {
result1 []string
})
}
fake.assetNamesReturnsOnCall[i] = struct {
result1 []string
}{result1}
}
func (fake *FakeBindata) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.assetMutex.RLock()
defer fake.assetMutex.RUnlock()
fake.assetNamesMutex.RLock()
defer fake.assetNamesMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *FakeBindata) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ migration.Bindata = new(FakeBindata)

View File

@ -7,7 +7,6 @@ import (
"github.com/concourse/concourse/atc/db/lock"
"github.com/concourse/concourse/atc/db/migration"
"github.com/concourse/concourse/atc/db/migration/migrationfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@ -18,7 +17,6 @@ var _ = Describe("OpenHelper", func() {
db *sql.DB
lockDB *sql.DB
lockFactory lock.LockFactory
bindata *migrationfakes.FakeBindata
openHelper *migration.OpenHelper
fakeLogFunc = func(logger lager.Logger, id lock.LockID) {}
)
@ -32,9 +30,6 @@ var _ = Describe("OpenHelper", func() {
lockFactory = lock.NewLockFactory(lockDB, fakeLogFunc, fakeLogFunc)
openHelper = migration.NewOpenHelper("postgres", postgresRunner.DataSourceName(), lockFactory, nil, nil)
bindata = new(migrationfakes.FakeBindata)
bindata.AssetStub = asset
})
AfterEach(func() {
@ -82,10 +77,6 @@ var _ = Describe("OpenHelper", func() {
})
It("Runs migrator if migration_version table does not exist", func() {
bindata.AssetNamesReturns([]string{
"1510262030_initial_schema.up.sql",
})
err = openHelper.MigrateToVersion(initialSchemaVersion)
Expect(err).NotTo(HaveOccurred())
@ -95,7 +86,6 @@ var _ = Describe("OpenHelper", func() {
ExpectToBeAbleToInsertData(db)
})
})
})

View File

@ -2,6 +2,7 @@ package migration
import (
"errors"
"io/fs"
"regexp"
"strconv"
"strings"
@ -13,12 +14,12 @@ var goMigrationFuncName = regexp.MustCompile(`(Up|Down)_[0-9]*`)
var ErrCouldNotParseDirection = errors.New("could not parse direction for migration")
type Parser struct {
bindata Bindata
migrationsFS fs.FS
}
func NewParser(bindata Bindata) *Parser {
func NewParser(migrationsFS fs.FS) *Parser {
return &Parser{
bindata: bindata,
migrationsFS: migrationsFS,
}
}
@ -49,7 +50,7 @@ func (p *Parser) ParseFileToMigration(migrationName string) (migration, error) {
return migration, err
}
migrationBytes, err := p.bindata.Asset(migrationName)
migrationBytes, err := fs.ReadFile(p.migrationsFS, migrationName)
if err != nil {
return migration, err
}

View File

@ -1,8 +1,9 @@
package migration_test
import (
"testing/fstest"
"github.com/concourse/concourse/atc/db/migration"
"github.com/concourse/concourse/atc/db/migration/migrationfakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
@ -13,21 +14,40 @@ var basicSQLMigration = []byte(`BEGIN;
COMMIT;
`)
var basicSQLDownMigration = []byte(`BEGIN;
-- create a table
DROP TABLE some_table;
COMMIT;
`)
var _ = Describe("Parser", func() {
var (
parser *migration.Parser
bindata *migrationfakes.FakeBindata
parser *migration.Parser
)
BeforeEach(func() {
bindata = new(migrationfakes.FakeBindata)
bindata.AssetReturns([]byte{}, nil)
parser = migration.NewParser(bindata)
parser = migration.NewParser(fstest.MapFS{
"1000_some_migration.up.sql": &fstest.MapFile{
Data: basicSQLMigration,
},
"1000_some_migration.down.sql": &fstest.MapFile{
Data: basicSQLDownMigration,
},
"2000_some_go_migration.up.go": &fstest.MapFile{
Data: []byte(`
func (m *Migrator) Up_2000() {}
`),
},
"2000_some_go_migration.down.go": &fstest.MapFile{
Data: []byte(`
func (m *Migrator) Down_2000() {}
`),
},
})
})
It("parses the direction of the migration from the file name", func() {
downMigration, err := parser.ParseFileToMigration("2000_some_migration.down.go")
downMigration, err := parser.ParseFileToMigration("2000_some_go_migration.down.go")
Expect(err).ToNot(HaveOccurred())
Expect(downMigration.Direction).To(Equal("down"))
@ -37,11 +57,10 @@ var _ = Describe("Parser", func() {
})
It("parses the strategy of the migration from the file", func() {
downMigration, err := parser.ParseFileToMigration("2000_some_migration.down.go")
downMigration, err := parser.ParseFileToMigration("2000_some_go_migration.down.go")
Expect(err).ToNot(HaveOccurred())
Expect(downMigration.Strategy).To(Equal(migration.GoMigration))
bindata.AssetReturns(basicSQLMigration, nil)
upMigration, err := parser.ParseFileToMigration("1000_some_migration.up.sql")
Expect(err).ToNot(HaveOccurred())
Expect(upMigration.Strategy).To(Equal(migration.SQLMigration))
@ -49,19 +68,18 @@ var _ = Describe("Parser", func() {
Context("SQL migrations", func() {
It("parses the migration into statements", func() {
bindata.AssetReturns(basicSQLMigration, nil)
migration, err := parser.ParseFileToMigration("1234_create_and_alter_table.up.sql")
migration, err := parser.ParseFileToMigration("1000_some_migration.up.sql")
Expect(err).ToNot(HaveOccurred())
Expect(migration.Statements).To(Equal(string(basicSQLMigration)))
migration, err = parser.ParseFileToMigration("1000_some_migration.down.sql")
Expect(err).ToNot(HaveOccurred())
Expect(migration.Statements).To(Equal(string(basicSQLDownMigration)))
})
})
Context("Go migrations", func() {
It("returns the name of the migration function to run", func() {
bindata.AssetReturns([]byte(`
func (m *Migrator) Up_2000() {}
`), nil)
migration, err := parser.ParseFileToMigration("2000_some_go_migration.up.go")
Expect(err).ToNot(HaveOccurred())
Expect(migration.Name).To(Equal("Up_2000"))

View File

@ -1,31 +0,0 @@
package migration
import (
"github.com/gobuffalo/packr"
)
//go:generate counterfeiter . Bindata
type Bindata interface {
AssetNames() []string
Asset(name string) ([]byte, error)
}
type packrSource struct {
packr.Box
}
func (bs *packrSource) AssetNames() []string {
migrations := []string{}
for _, name := range bs.Box.List() {
if name != "migrations.go" {
migrations = append(migrations, name)
}
}
return migrations
}
func (bs *packrSource) Asset(name string) ([]byte, error) {
return bs.Box.MustBytes(name)
}

View File

@ -2,7 +2,9 @@ package gclient
import (
"context"
"errors"
"io"
"net"
"strings"
"code.cloudfoundry.org/garden"
@ -80,7 +82,7 @@ func (process *retryableProcess) Signal(sig garden.Signal) error {
return nil
}
if strings.Contains(err.Error(), "use of closed network connection") {
if errors.Is(err, net.ErrClosed) {
return err
}
@ -98,7 +100,7 @@ func (process *retryableProcess) SetTTY(tty garden.TTYSpec) error {
return nil
}
if strings.Contains(err.Error(), "use of closed network connection") {
if errors.Is(err, net.ErrClosed) {
return err
}

3
go.mod
View File

@ -35,7 +35,6 @@ require (
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/fatih/color v1.9.0
github.com/felixge/httpsnoop v1.0.1
github.com/gobuffalo/packr v1.13.7
github.com/gobwas/glob v0.2.3
github.com/goccy/go-yaml v1.8.3
github.com/gogo/googleapis v1.3.1 // indirect
@ -105,6 +104,6 @@ require (
sigs.k8s.io/yaml v1.2.0
)
go 1.13
go 1.16
replace github.com/docker/distribution v2.7.1+incompatible => github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible

9
go.sum
View File

@ -55,7 +55,6 @@ contrib.go.opencensus.io/exporter/prometheus v0.2.0/go.mod h1:TYmVAyE8Tn1lyPcltF
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
@ -162,7 +161,6 @@ github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -444,7 +442,6 @@ github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTM
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/validator/v10 v10.4.0/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
github.com/go-redis/redis v6.15.6+incompatible h1:H9evprGPLI8+ci7fxQx6WNZHJSb7be8FqJQRhdQZ5Sg=
github.com/go-redis/redis v6.15.6+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -487,8 +484,6 @@ github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9h
github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
github.com/gobuffalo/packr v1.13.7 h1:2uZgLd6b/W4yRBZV/ScaORxZLNGMHO0VCvqQNkKukNA=
github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24=
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
@ -891,7 +886,6 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
@ -1151,7 +1145,6 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@ -1363,7 +1356,6 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1751,7 +1743,6 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v9 v9.30.0 h1:Wk0Z37oBmKj9/n+tPyBHZmeL19LaCoK3Qq48VwYENss=
gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=

View File

@ -2,6 +2,7 @@ package tsacmd
import (
"context"
"errors"
"flag"
"fmt"
"io"
@ -86,7 +87,7 @@ func (server *server) Serve(listener net.Listener) {
for {
c, err := listener.Accept()
if err != nil {
if !strings.Contains(err.Error(), "use of closed network connection") {
if !errors.Is(err, net.ErrClosed) {
server.logger.Error("failed-to-accept", err)
}

View File

@ -1,4 +1,4 @@
package publichandler
package web
import (
"fmt"

View File

@ -1,4 +1,4 @@
package publichandler_test
package web_test
import (
"compress/gzip"
@ -11,7 +11,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/concourse/concourse/web/publichandler"
"github.com/concourse/concourse/web"
)
var _ = Describe("CacheNearlyForever", func() {
@ -20,7 +20,7 @@ var _ = Describe("CacheNearlyForever", func() {
fmt.Fprint(w, "The wrapped handler was called!")
})
wrappedHandler := publichandler.CacheNearlyForever(insideHandler)
wrappedHandler := web.CacheNearlyForever(insideHandler)
recorder := httptest.NewRecorder()
request, err := http.NewRequest("GET", "/", nil)
Expect(err).ToNot(HaveOccurred())
@ -39,7 +39,7 @@ var _ = Describe("CacheNearlyForever", func() {
fmt.Fprint(w, strings.Repeat("abc123", 1000))
})
wrappedHandler := publichandler.CacheNearlyForever(insideHandler)
wrappedHandler := web.CacheNearlyForever(insideHandler)
recorder := httptest.NewRecorder()
request, err := http.NewRequest("GET", "/", nil)
Expect(err).ToNot(HaveOccurred())

View File

@ -1,40 +0,0 @@
package main
import (
"net/http"
"os"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/web"
"github.com/concourse/concourse/web/proxyhandler"
)
func NewLogger() lager.Logger {
logger := lager.NewLogger("web")
logger.RegisterSink(lager.NewReconfigurableSink(lager.NewPrettySink(os.Stdout, lager.DEBUG), lager.DEBUG))
return logger
}
func main() {
logger := NewLogger()
proxyHandler, err := proxyhandler.NewHandler(logger, "http://localhost:8080")
if err != nil {
panic(err)
}
webHandler, err := web.NewHandler(logger)
if err != nil {
panic(err)
}
http.Handle("/api/", proxyHandler)
http.Handle("/auth/", proxyHandler)
http.Handle("/oauth/", proxyHandler)
http.Handle("/", webHandler)
if err = http.ListenAndServe(":8081", nil); err != nil {
logger.Error("server-error", err)
}
}

View File

@ -1,30 +1,35 @@
package web
import (
"embed"
"fmt"
"io/fs"
"net/http"
"os"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/web/indexhandler"
"github.com/concourse/concourse/web/publichandler"
"github.com/concourse/concourse/web/robotshandler"
)
func NewHandler(logger lager.Logger) (http.Handler, error) {
indexHandler, err := indexhandler.NewHandler(logger)
if err != nil {
return nil, err
}
//go:embed public
var publicEmbed embed.FS
publicHandler, err := publichandler.NewHandler()
if err != nil {
return nil, err
func NewHandler(logger lager.Logger, livePublicDir string) (http.Handler, error) {
var publicFS fs.FS
if livePublicDir != "" {
publicFS = os.DirFS(livePublicDir)
} else {
var err error
publicFS, err = fs.Sub(publicEmbed, "public")
if err != nil {
return nil, fmt.Errorf("public fs sub: %w", err)
}
}
robotsHandler := robotshandler.NewHandler()
webMux := http.NewServeMux()
webMux.Handle("/public/", publicHandler)
webMux.Handle("/robots.txt", robotsHandler)
webMux.Handle("/", indexHandler)
webMux.Handle("/public/", PublicHandler(publicFS))
webMux.Handle("/robots.txt", RobotsHandler)
webMux.Handle("/", IndexHandler(logger, publicFS))
return webMux, nil
}

80
web/index_handler.go Normal file
View File

@ -0,0 +1,80 @@
package web
import (
"crypto/md5"
"fmt"
"html/template"
"io/fs"
"net/http"
"sync"
"code.cloudfoundry.org/lager"
)
func IndexHandler(logger lager.Logger, publicFS fs.FS) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log := logger.Session("index")
tfuncs := &indexTemplateFuncs{
publicFS: publicFS,
assetIDs: map[string]string{},
}
funcs := template.FuncMap{
"asset": tfuncs.asset,
}
t, err := template.New("web").Funcs(funcs).ParseFS(publicFS, "index.html")
if err != nil {
log.Error("failed-to-parse-templates", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
err = t.ExecuteTemplate(w, "index.html", indexTemplateData{
CSRFToken: r.FormValue("csrf_token"),
AuthToken: r.Header.Get("Authorization"),
})
if err != nil {
log.Error("failed-to-build-template", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
})
}
type indexTemplateData struct {
CSRFToken string
AuthToken string
}
type indexTemplateFuncs struct {
publicFS fs.FS
assetIDs map[string]string
assetsL sync.Mutex
}
func (funcs *indexTemplateFuncs) asset(asset string) (string, error) {
funcs.assetsL.Lock()
defer funcs.assetsL.Unlock()
id, found := funcs.assetIDs[asset]
if !found {
hash := md5.New()
contents, err := fs.ReadFile(funcs.publicFS, asset)
if err != nil {
return "", err
}
_, err = hash.Write(contents)
if err != nil {
return "", err
}
id = fmt.Sprintf("%x", hash.Sum(nil))
}
return fmt.Sprintf("/public/%s?id=%s", asset, id), nil
}

View File

@ -1,61 +0,0 @@
package indexhandler
import (
"html/template"
"net/http"
"code.cloudfoundry.org/lager"
"github.com/gobuffalo/packr"
)
type templateData struct {
CSRFToken string
AuthToken string
}
type handler struct {
logger lager.Logger
template *template.Template
}
func NewHandler(logger lager.Logger) (http.Handler, error) {
tfuncs := &templateFuncs{
assetIDs: map[string]string{},
}
funcs := template.FuncMap{
"asset": tfuncs.asset,
}
box := packr.NewBox("../public")
src, err := box.MustBytes("index.html")
if err != nil {
return nil, err
}
t, err := template.New("index").Funcs(funcs).Parse(string(src))
if err != nil {
return nil, err
}
return &handler{
logger: logger,
template: t,
}, nil
}
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log := h.logger.Session("index")
err := h.template.Execute(w, templateData{
CSRFToken: r.FormValue("csrf_token"),
AuthToken: r.Header.Get("Authorization"),
})
if err != nil {
log.Fatal("failed-to-build-template", err, lager.Data{})
w.WriteHeader(http.StatusInternalServerError)
}
}

View File

@ -1,40 +0,0 @@
package indexhandler
import (
"crypto/md5"
"fmt"
"sync"
"github.com/gobuffalo/packr"
)
type templateFuncs struct {
assetIDs map[string]string
assetsL sync.Mutex
}
func (funcs *templateFuncs) asset(asset string) (string, error) {
funcs.assetsL.Lock()
defer funcs.assetsL.Unlock()
box := packr.NewBox("../public")
id, found := funcs.assetIDs[asset]
if !found {
hash := md5.New()
contents, err := box.MustBytes(asset)
if err != nil {
return "", err
}
_, err = hash.Write(contents)
if err != nil {
return "", err
}
id = fmt.Sprintf("%x", hash.Sum(nil))
}
return fmt.Sprintf("/public/%s?id=%s", asset, id), nil
}

View File

@ -1,36 +0,0 @@
package proxyhandler
import (
"net"
"net/http"
"net/http/httputil"
"net/url"
"time"
"code.cloudfoundry.org/lager"
)
func NewHandler(logger lager.Logger, host string) (http.Handler, error) {
targetUrl, err := url.Parse(host)
if err != nil {
return nil, err
}
dialer := &net.Dialer{
Timeout: 24 * time.Hour,
KeepAlive: 24 * time.Hour,
}
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: dialer.Dial,
TLSHandshakeTimeout: 60 * time.Second,
}
handler := httputil.NewSingleHostReverseProxy(targetUrl)
handler.FlushInterval = 100 * time.Millisecond
handler.Transport = transport
return handler, nil
}

15
web/public_handler.go Normal file
View File

@ -0,0 +1,15 @@
package web
import (
"io/fs"
"net/http"
)
func PublicHandler(publicFS fs.FS) http.Handler {
return CacheNearlyForever(
http.StripPrefix(
"/public/",
http.FileServer(http.FS(publicFS)),
),
)
}

View File

@ -1,11 +0,0 @@
package publichandler
import (
"net/http"
"github.com/gobuffalo/packr"
)
func NewHandler() (http.Handler, error) {
return CacheNearlyForever(http.StripPrefix("/public/", http.FileServer(packr.NewBox("../public")))), nil
}

11
web/robots_handler.go Normal file
View File

@ -0,0 +1,11 @@
package web
import (
"fmt"
"net/http"
)
var RobotsHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "User-agent: *")
fmt.Fprintln(w, "Disallow: /")
})

View File

@ -1,17 +0,0 @@
package robotshandler
import (
"fmt"
"net/http"
)
func NewHandler() http.Handler {
return &handler{}
}
type handler struct{}
func (self *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "User-agent: *")
fmt.Fprintln(w, "Disallow: /")
}

View File

@ -1,13 +1,13 @@
package publichandler_test
package web_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestPublichandler(t *testing.T) {
func TestWebHandler(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Public Handler Suite")
RunSpecs(t, "Web Handler Suite")
}