Skip to content

Commit

Permalink
feat: improve skip-publish behavior (#1474)
Browse files Browse the repository at this point in the history
* Revert "feat: split brew tap in 2 steps (#1425)"

This reverts commit 5e8882f.

Signed-off-by: Carlos Alexandro Becker <caarlos0@gmail.com>

* fix: brew generation

Signed-off-by: Carlos Alexandro Becker <caarlos0@gmail.com>

* feat: improve bucket write

Signed-off-by: Carlos Alexandro Becker <caarlos0@gmail.com>

* fix: tests

Signed-off-by: Carlos Alexandro Becker <caarlos0@gmail.com>

* fix: tests

Signed-off-by: Carlos Alexandro Becker <caarlos0@gmail.com>

* fix: minio test

Signed-off-by: Carlos Alexandro Becker <caarlos0@gmail.com>

* fix: lint issues

Signed-off-by: Carlos Alexandro Becker <caarlos0@gmail.com>

* fix: lint issues

Signed-off-by: Carlos Alexandro Becker <caarlos0@gmail.com>

* fix: err handling

Signed-off-by: Carlos Alexandro Becker <caarlos0@gmail.com>
  • Loading branch information
caarlos0 committed Apr 29, 2020
1 parent 705ab90 commit 15fd80e
Show file tree
Hide file tree
Showing 13 changed files with 271 additions and 221 deletions.
4 changes: 0 additions & 4 deletions internal/artifact/artifact.go
Expand Up @@ -29,8 +29,6 @@ const (
UploadableBinary
// UploadableFile is any file that can be uploaded
UploadableFile
// UploadableBrewTap is a .rb file that can be uploaded
UploadableBrewTap
// Binary is a binary (output of a gobuild)
Binary
// LinuxPackage is a linux package generated by nfpm
Expand Down Expand Up @@ -60,8 +58,6 @@ func (t Type) String() string {
case UploadableBinary:
case Binary:
return "Binary"
case UploadableBrewTap:
return "Brew Tap"
case LinuxPackage:
return "Linux Package"
case DockerImage:
Expand Down
9 changes: 6 additions & 3 deletions internal/pipe/blob/blob.go
Expand Up @@ -37,13 +37,16 @@ func (Pipe) Publish(ctx *context.Context) error {
if len(ctx.Config.Blobs) == 0 {
return pipe.Skip("Blob section is not configured")
}
// Openning connection to the list of buckets
o := newOpenBucket()
var up uploader = productionUploader{}
if ctx.SkipPublish {
up = skipUploader{}
}

var g = semerrgroup.New(ctx.Parallelism)
for _, conf := range ctx.Config.Blobs {
conf := conf
g.Go(func() error {
return o.Upload(ctx, conf)
return doUpload(ctx, conf, up)
})
}
return g.Wait()
Expand Down
86 changes: 80 additions & 6 deletions internal/pipe/blob/blob_minio_test.go
Expand Up @@ -5,6 +5,7 @@ package blob
// the test setup and teardown

import (
"io"
"io/ioutil"
"net"
"os"
Expand All @@ -19,6 +20,7 @@ import (
"github.com/goreleaser/goreleaser/pkg/context"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gocloud.dev/blob"
)

func TestMinioUpload(t *testing.T) {
Expand Down Expand Up @@ -153,7 +155,7 @@ func TestMinioUploadInvalidCustomBucketID(t *testing.T) {
},
},
})
ctx.Git = context.GitInfo{CurrentTag: "v1.0.0"}
ctx.Git = context.GitInfo{CurrentTag: "v1.1.0"}
ctx.Artifacts.Add(&artifact.Artifact{
Type: artifact.UploadableArchive,
Name: "bin.tar.gz",
Expand All @@ -172,6 +174,77 @@ func TestMinioUploadInvalidCustomBucketID(t *testing.T) {
assert.Error(t, Pipe{}.Publish(ctx))
}

func TestMinioUploadSkipPublish(t *testing.T) {
var listen = randomListen(t)
folder, err := ioutil.TempDir("", "goreleasertest")
assert.NoError(t, err)
srcpath := filepath.Join(folder, "source.tar.gz")
tgzpath := filepath.Join(folder, "bin.tar.gz")
debpath := filepath.Join(folder, "bin.deb")
checkpath := filepath.Join(folder, "check.txt")
assert.NoError(t, ioutil.WriteFile(checkpath, []byte("fake checksums"), 0744))
assert.NoError(t, ioutil.WriteFile(srcpath, []byte("fake\nsrc"), 0744))
assert.NoError(t, ioutil.WriteFile(tgzpath, []byte("fake\ntargz"), 0744))
assert.NoError(t, ioutil.WriteFile(debpath, []byte("fake\ndeb"), 0744))
var ctx = context.New(config.Project{
Dist: folder,
ProjectName: "testupload",
Blobs: []config.Blob{
{
Provider: "s3",
Bucket: "test",
Region: "us-east",
Endpoint: "http://" + listen,
IDs: []string{"foo", "bar"},
},
},
})
ctx.SkipPublish = true
ctx.Git = context.GitInfo{CurrentTag: "v1.2.0"}
ctx.Artifacts.Add(&artifact.Artifact{
Type: artifact.Checksum,
Name: "checksum.txt",
Path: checkpath,
})
ctx.Artifacts.Add(&artifact.Artifact{
Type: artifact.UploadableSourceArchive,
Name: "source.tar.gz",
Path: srcpath,
Extra: map[string]interface{}{
"Format": "tar.gz",
},
})
ctx.Artifacts.Add(&artifact.Artifact{
Type: artifact.UploadableArchive,
Name: "bin.tar.gz",
Path: tgzpath,
Extra: map[string]interface{}{
"ID": "foo",
},
})
ctx.Artifacts.Add(&artifact.Artifact{
Type: artifact.LinuxPackage,
Name: "bin.deb",
Path: debpath,
Extra: map[string]interface{}{
"ID": "bar",
},
})
var name = "test_upload"
defer stop(t, name)
start(t, name, listen)
prepareEnv(t, listen)
assert.NoError(t, Pipe{}.Default(ctx))
assert.NoError(t, Pipe{}.Publish(ctx))

require.NotContains(t, getFiles(t, ctx, ctx.Config.Blobs[0]), []string{
"testupload/v1.2.0/bin.deb",
"testupload/v1.2.0/bin.tar.gz",
"testupload/v1.2.0/checksum.txt",
"testupload/v1.2.0/source.tar.gz",
})
}

func randomListen(t *testing.T) string {
listener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
Expand Down Expand Up @@ -229,19 +302,20 @@ func removeTestData(t *testing.T) {
_ = os.RemoveAll("./testdata/data/test/testupload") // dont care if it fails
}

func getFiles(t *testing.T, ctx *context.Context, blob config.Blob) []string {
var bucket = Bucket{}
url, err := bucket.url(ctx, blob)
func getFiles(t *testing.T, ctx *context.Context, cfg config.Blob) []string {
url, err := urlFor(ctx, cfg)
require.NoError(t, err)
conn, err := bucket.Connect(ctx, url)
conn, err := blob.OpenBucket(ctx, url)
require.NoError(t, err)
defer conn.Close()
var iter = conn.List(nil)
var files []string
for {
file, err := iter.Next(ctx)
if err != nil {
if err != nil && err == io.EOF {
break
}
require.NoError(t, err)
files = append(files, file.Key)
}
return files
Expand Down
12 changes: 5 additions & 7 deletions internal/pipe/blob/blob_test.go
Expand Up @@ -240,10 +240,8 @@ func TestPipe_Publish(t *testing.T) {
}

func TestURL(t *testing.T) {
var buck = Bucket{}

t.Run("s3 with opts", func(t *testing.T) {
url, err := buck.url(context.New(config.Project{}), config.Blob{
url, err := urlFor(context.New(config.Project{}), config.Blob{
Bucket: "foo",
Provider: "s3",
Region: "us-west-1",
Expand All @@ -256,7 +254,7 @@ func TestURL(t *testing.T) {
})

t.Run("s3 with some opts", func(t *testing.T) {
url, err := buck.url(context.New(config.Project{}), config.Blob{
url, err := urlFor(context.New(config.Project{}), config.Blob{
Bucket: "foo",
Provider: "s3",
Region: "us-west-1",
Expand All @@ -267,7 +265,7 @@ func TestURL(t *testing.T) {
})

t.Run("gs with opts", func(t *testing.T) {
url, err := buck.url(context.New(config.Project{}), config.Blob{
url, err := urlFor(context.New(config.Project{}), config.Blob{
Bucket: "foo",
Provider: "gs",
Region: "us-west-1",
Expand All @@ -280,7 +278,7 @@ func TestURL(t *testing.T) {
})

t.Run("s3 no opts", func(t *testing.T) {
url, err := buck.url(context.New(config.Project{}), config.Blob{
url, err := urlFor(context.New(config.Project{}), config.Blob{
Bucket: "foo",
Provider: "s3",
})
Expand All @@ -289,7 +287,7 @@ func TestURL(t *testing.T) {
})

t.Run("gs no opts", func(t *testing.T) {
url, err := buck.url(context.New(config.Project{}), config.Blob{
url, err := urlFor(context.New(config.Project{}), config.Blob{
Bucket: "foo",
Provider: "gs",
})
Expand Down
118 changes: 58 additions & 60 deletions internal/pipe/blob/openbucket.go → internal/pipe/blob/upload.go
Expand Up @@ -27,32 +27,7 @@ import (
_ "gocloud.dev/secrets/gcpkms"
)

// OpenBucket is the interface that wraps the BucketConnect and UploadBucket method
type OpenBucket interface {
Connect(ctx *context.Context, bucketURL string) (*blob.Bucket, error)
Upload(ctx *context.Context, conf config.Blob) error
}

// Bucket is object which holds connection for Go Bucker Provider
type Bucket struct {
BucketConn *blob.Bucket
}

// returns openbucket connection for list of providers
func newOpenBucket() OpenBucket {
return Bucket{}
}

// Connect makes connection with provider
func (b Bucket) Connect(ctx *context.Context, bucketURL string) (*blob.Bucket, error) {
conn, err := blob.OpenBucket(ctx, bucketURL)
if err != nil {
return nil, err
}
return conn, nil
}

func (b Bucket) url(ctx *context.Context, conf config.Blob) (string, error) {
func urlFor(ctx *context.Context, conf config.Blob) (string, error) {
bucket, err := tmpl.New(ctx).Apply(conf.Bucket)
if err != nil {
return "", err
Expand Down Expand Up @@ -83,26 +58,20 @@ func (b Bucket) url(ctx *context.Context, conf config.Blob) (string, error) {
return bucketURL, nil
}

// Upload takes connection initilized from newOpenBucket to upload goreleaser artifacts
// Takes goreleaser context(which includes artificats) and bucketURL for upload destination (gs://gorelease-bucket)
func (b Bucket) Upload(ctx *context.Context, conf config.Blob) error {
// Takes goreleaser context(which includes artificats) and bucketURL for
// upload to destination (eg: gs://gorelease-bucket) using the given uploader
// implementation
func doUpload(ctx *context.Context, conf config.Blob, up uploader) error {
folder, err := tmpl.New(ctx).Apply(conf.Folder)
if err != nil {
return err
}

bucketURL, err := b.url(ctx, conf)
bucketURL, err := urlFor(ctx, conf)
if err != nil {
return err
}

// Get the openbucket connection for specific provider
conn, err := b.Connect(ctx, bucketURL)
if err != nil {
return err
}
defer conn.Close()

var filter = artifact.Or(
artifact.ByType(artifact.UploadableArchive),
artifact.ByType(artifact.UploadableBinary),
Expand All @@ -119,34 +88,18 @@ func (b Bucket) Upload(ctx *context.Context, conf config.Blob) error {
for _, artifact := range ctx.Artifacts.Filter(filter).List() {
artifact := artifact
g.Go(func() error {
log.WithFields(log.Fields{
"provider": bucketURL,
"folder": folder,
"artifact": artifact.Name,
}).Info("uploading")

// TODO: replace this with ?prefix=folder on the bucket url
w, err := conn.NewWriter(ctx, filepath.Join(folder, artifact.Name), nil)
if err != nil {
return errors.Wrap(err, "failed to obtain writer")
}
data, err := getData(ctx, conf, artifact.Path)
if err != nil {
return err
}
_, err = w.Write(data)
if err != nil {

if err := up.Upload(ctx, bucketURL, filepath.Join(folder, artifact.Name), data); err != nil {
switch {
case errorContains(err, "NoSuchBucket", "ContainerNotFound", "notFound"):
return errors.Wrapf(err, "provided bucket does not exist: %s", bucketURL)
case errorContains(err, "NoCredentialProviders"):
return errors.Wrapf(err, "check credentials and access to bucket: %s", bucketURL)
default:
return errors.Wrapf(err, "failed to write to bucket")
}
}
if err = w.Close(); err != nil {
switch {
case errorContains(err, "InvalidAccessKeyId"):
return errors.Wrap(err, "aws access key id you provided does not exist in our records")
case errorContains(err, "AuthenticationFailed"):
Expand All @@ -155,14 +108,10 @@ func (b Bucket) Upload(ctx *context.Context, conf config.Blob) error {
return errors.Wrap(err, "google app credentials you provided is not valid")
case errorContains(err, "no such host"):
return errors.Wrap(err, "azure storage account you provided is not valid")
case errorContains(err, "NoSuchBucket", "ContainerNotFound", "notFound"):
return errors.Wrapf(err, "provided bucket does not exist: %s", bucketURL)
case errorContains(err, "NoCredentialProviders"):
return errors.Wrapf(err, "check credentials and access to bucket %s", bucketURL)
case errorContains(err, "ServiceCode=ResourceNotFound"):
return errors.Wrapf(err, "missing azure storage key for provided bucket %s", bucketURL)
default:
return errors.Wrap(err, "failed to close Bucket writer")
return errors.Wrap(err, "failed to write to bucket")
}
}
return err
Expand Down Expand Up @@ -190,3 +139,52 @@ func getData(ctx *context.Context, conf config.Blob, path string) ([]byte, error
}
return data, err
}

// uploader implements upload
type uploader interface {
Upload(ctx *context.Context, url, path string, data []byte) error
}

// skipUploader is used when --skip-upload is set and will just log
// things without really doing anything
type skipUploader struct{}

func (u skipUploader) Upload(_ *context.Context, url, path string, _ []byte) error {
log.WithFields(log.Fields{
"bucket": url,
"path": path,
}).Warn("doUpload skipped because skip-publish is set")
return nil
}

// productionUploader actually do upload to
type productionUploader struct{}

func (u productionUploader) Upload(ctx *context.Context, url, path string, data []byte) (err error) {
log.WithFields(log.Fields{
"bucket": url,
"path": path,
}).Info("uploading")

// TODO: its not so great that we open one connection for each file
conn, err := blob.OpenBucket(ctx, url)
if err != nil {
return err
}
defer func() {
if cerr := conn.Close(); err == nil {
err = cerr
}
}()
w, err := conn.NewWriter(ctx, path, nil)
if err != nil {
return err
}
defer func() {
if cerr := w.Close(); err == nil {
err = cerr
}
}()
_, err = w.Write(data)
return
}

0 comments on commit 15fd80e

Please sign in to comment.