From 9f87283cd58ccf64715fdd2c487f17c261ff65a7 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 11 Feb 2019 23:14:22 -0800 Subject: [PATCH] Revert and bring back B2 gateway implementation (#7224) This PR is simply a revert of 3265112d04c13c2ec49731868824224a0a6af823 just for B2 gateway. --- cmd/gateway/b2/gateway-b2.go | 850 ++++++++++++ cmd/gateway/b2/gateway-b2_test.go | 119 ++ cmd/gateway/gateway.go | 4 + docs/gateway/README.md | 2 + docs/gateway/b2.md | 58 + .../garyburd/redigo/internal/commandinfo.go | 54 - vendor/github.com/minio/blazer/LICENSE | 13 + vendor/github.com/minio/blazer/base/base.go | 1204 +++++++++++++++++ .../github.com/minio/blazer/base/strings.go | 81 ++ .../minio/blazer/internal/b2types/b2types.go | 255 ++++ .../minio/blazer/internal/blog/blog.go | 54 + vendor/vendor.json | 24 +- 12 files changed, 2658 insertions(+), 60 deletions(-) create mode 100644 cmd/gateway/b2/gateway-b2.go create mode 100644 cmd/gateway/b2/gateway-b2_test.go create mode 100644 docs/gateway/b2.md delete mode 100644 vendor/github.com/garyburd/redigo/internal/commandinfo.go create mode 100644 vendor/github.com/minio/blazer/LICENSE create mode 100644 vendor/github.com/minio/blazer/base/base.go create mode 100644 vendor/github.com/minio/blazer/base/strings.go create mode 100644 vendor/github.com/minio/blazer/internal/b2types/b2types.go create mode 100644 vendor/github.com/minio/blazer/internal/blog/blog.go diff --git a/cmd/gateway/b2/gateway-b2.go b/cmd/gateway/b2/gateway-b2.go new file mode 100644 index 000000000..a01e29196 --- /dev/null +++ b/cmd/gateway/b2/gateway-b2.go @@ -0,0 +1,850 @@ +/* + * Minio Cloud Storage, (C) 2017, 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package b2 + +import ( + "context" + "crypto/sha1" + "fmt" + "hash" + "io" + "io/ioutil" + "net/http" + + "strings" + "sync" + "time" + + b2 "github.com/minio/blazer/base" + "github.com/minio/cli" + miniogopolicy "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio/cmd/logger" + "github.com/minio/minio/pkg/auth" + h2 "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/policy" + "github.com/minio/minio/pkg/policy/condition" + + minio "github.com/minio/minio/cmd" +) + +// Supported bucket types by B2 backend. +const ( + bucketTypePrivate = "allPrivate" + bucketTypeReadOnly = "allPublic" + b2Backend = "b2" +) + +func init() { + const b2GatewayTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} +{{if .VisibleFlags}} +FLAGS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +ENVIRONMENT VARIABLES: + ACCESS: + MINIO_ACCESS_KEY: B2 account id. + MINIO_SECRET_KEY: B2 application key. + + BROWSER: + MINIO_BROWSER: To disable web browser access, set this value to "off". + + DOMAIN: + MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name. + + CACHE: + MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";". + MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";". + MINIO_CACHE_EXPIRY: Cache expiry duration in days. + MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100). + +EXAMPLES: + 1. Start minio gateway server for B2 backend. + $ export MINIO_ACCESS_KEY=accountID + $ export MINIO_SECRET_KEY=applicationKey + $ {{.HelpName}} + + 2. Start minio gateway server for B2 backend with edge caching enabled. + $ export MINIO_ACCESS_KEY=accountID + $ export MINIO_SECRET_KEY=applicationKey + $ export MINIO_CACHE_DRIVES="/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4" + $ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png" + $ export MINIO_CACHE_EXPIRY=40 + $ export MINIO_CACHE_MAXUSE=80 + $ {{.HelpName}} +` + minio.RegisterGatewayCommand(cli.Command{ + Name: b2Backend, + Usage: "Backblaze B2", + Action: b2GatewayMain, + CustomHelpTemplate: b2GatewayTemplate, + HideHelpCommand: true, + }) +} + +// Handler for 'minio gateway b2' command line. +func b2GatewayMain(ctx *cli.Context) { + minio.StartGateway(ctx, &B2{}) +} + +// B2 implements Minio Gateway +type B2 struct{} + +// Name implements Gateway interface. +func (g *B2) Name() string { + return b2Backend +} + +// NewGatewayLayer returns b2 gateway layer, implements ObjectLayer interface to +// talk to B2 remote backend. +func (g *B2) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) { + ctx := context.Background() + client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport())) + if err != nil { + return nil, err + } + + return &b2Objects{ + creds: creds, + b2Client: client, + ctx: ctx, + }, nil +} + +// Production - Ready for production use. +func (g *B2) Production() bool { + return true +} + +// b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers. +type b2Objects struct { + minio.GatewayUnsupported + mu sync.Mutex + creds auth.Credentials + b2Client *b2.B2 + ctx context.Context +} + +// Convert B2 errors to minio object layer errors. +func b2ToObjectError(err error, params ...string) error { + if err == nil { + return nil + } + bucket := "" + object := "" + uploadID := "" + if len(params) >= 1 { + bucket = params[0] + } + if len(params) == 2 { + object = params[1] + } + if len(params) == 3 { + uploadID = params[2] + } + + // Following code is a non-exhaustive check to convert + // B2 errors into S3 compatible errors. + // + // For a more complete information - https://www.backblaze.com/b2/docs/ + statusCode, code, msg := b2.Code(err) + if statusCode == 0 { + // We don't interpret non B2 errors. B2 errors have statusCode + // to help us convert them to S3 object errors. + return err + } + + switch code { + case "duplicate_bucket_name": + err = minio.BucketAlreadyOwnedByYou{Bucket: bucket} + case "bad_request": + if object != "" { + err = minio.ObjectNameInvalid{ + Bucket: bucket, + Object: object, + } + } else if bucket != "" { + err = minio.BucketNotFound{Bucket: bucket} + } + case "bad_json": + if object != "" { + err = minio.ObjectNameInvalid{ + Bucket: bucket, + Object: object, + } + } else if bucket != "" { + err = minio.BucketNameInvalid{Bucket: bucket} + } + case "bad_bucket_id": + err = minio.BucketNotFound{Bucket: bucket} + case "file_not_present", "not_found": + err = minio.ObjectNotFound{ + Bucket: bucket, + Object: object, + } + case "cannot_delete_non_empty_bucket": + err = minio.BucketNotEmpty{Bucket: bucket} + } + + // Special interpretation like this is required for Multipart sessions. + if strings.Contains(msg, "No active upload for") && uploadID != "" { + err = minio.InvalidUploadID{UploadID: uploadID} + } + + return err +} + +// Shutdown saves any gateway metadata to disk +// if necessary and reload upon next restart. +func (l *b2Objects) Shutdown(ctx context.Context) error { + // TODO + return nil +} + +// StorageInfo is not relevant to B2 backend. +func (l *b2Objects) StorageInfo(ctx context.Context) (si minio.StorageInfo) { + return si +} + +// MakeBucket creates a new container on B2 backend. +func (l *b2Objects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error { + // location is ignored for B2 backend. + + // All buckets are set to private by default. + _, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil) + logger.LogIf(ctx, err) + return b2ToObjectError(err, bucket) +} + +func (l *b2Objects) reAuthorizeAccount(ctx context.Context) error { + client, err := b2.AuthorizeAccount(l.ctx, l.creds.AccessKey, l.creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport())) + if err != nil { + return err + } + l.mu.Lock() + l.b2Client.Update(client) + l.mu.Unlock() + return nil +} + +// listBuckets is a wrapper similar to ListBuckets, which re-authorizes +// the account and updates the B2 client safely. Once successfully +// authorized performs the call again and returns list of buckets. +// For any errors which are not actionable we return an error. +func (l *b2Objects) listBuckets(ctx context.Context, err error) ([]*b2.Bucket, error) { + if err != nil { + if b2.Action(err) != b2.ReAuthenticate { + return nil, err + } + if rerr := l.reAuthorizeAccount(ctx); rerr != nil { + return nil, rerr + } + } + bktList, lerr := l.b2Client.ListBuckets(l.ctx) + if lerr != nil { + return l.listBuckets(ctx, lerr) + } + return bktList, nil +} + +// Bucket - is a helper which provides a *Bucket instance +// for performing an API operation. B2 API doesn't +// provide a direct way to access the bucket so we need +// to employ following technique. +func (l *b2Objects) Bucket(ctx context.Context, bucket string) (*b2.Bucket, error) { + bktList, err := l.listBuckets(ctx, nil) + if err != nil { + logger.LogIf(ctx, err) + return nil, b2ToObjectError(err, bucket) + } + for _, bkt := range bktList { + if bkt.Name == bucket { + return bkt, nil + } + } + return nil, minio.BucketNotFound{Bucket: bucket} +} + +// GetBucketInfo gets bucket metadata.. +func (l *b2Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) { + if _, err = l.Bucket(ctx, bucket); err != nil { + return bi, err + } + return minio.BucketInfo{ + Name: bucket, + Created: time.Unix(0, 0), + }, nil +} + +// ListBuckets lists all B2 buckets +func (l *b2Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) { + bktList, err := l.listBuckets(ctx, nil) + if err != nil { + return nil, err + } + var bktInfo []minio.BucketInfo + for _, bkt := range bktList { + bktInfo = append(bktInfo, minio.BucketInfo{ + Name: bkt.Name, + Created: time.Unix(0, 0), + }) + } + return bktInfo, nil +} + +// DeleteBucket deletes a bucket on B2 +func (l *b2Objects) DeleteBucket(ctx context.Context, bucket string) error { + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return err + } + err = bkt.DeleteBucket(l.ctx) + logger.LogIf(ctx, err) + return b2ToObjectError(err, bucket) +} + +// ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time. +func (l *b2Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return loi, err + } + files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter) + if lerr != nil { + logger.LogIf(ctx, lerr) + return loi, b2ToObjectError(lerr, bucket) + } + loi.IsTruncated = next != "" + loi.NextMarker = next + for _, file := range files { + switch file.Status { + case "folder": + loi.Prefixes = append(loi.Prefixes, file.Name) + case "upload": + loi.Objects = append(loi.Objects, minio.ObjectInfo{ + Bucket: bucket, + Name: file.Name, + ModTime: file.Timestamp, + Size: file.Size, + ETag: minio.ToS3ETag(file.Info.ID), + ContentType: file.Info.ContentType, + UserDefined: file.Info.Info, + }) + } + } + return loi, nil +} + +// ListObjectsV2 lists all objects in B2 bucket filtered by prefix, returns upto max 1000 entries at a time. +func (l *b2Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, + fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) { + // fetchOwner is not supported and unused. + marker := continuationToken + if marker == "" { + // B2's continuation token is an object name to "start at" rather than "start after" + // startAfter plus the lowest character B2 supports is used so that the startAfter + // object isn't included in the results + marker = startAfter + " " + } + + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return loi, err + } + files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter) + if lerr != nil { + logger.LogIf(ctx, lerr) + return loi, b2ToObjectError(lerr, bucket) + } + loi.IsTruncated = next != "" + loi.ContinuationToken = continuationToken + loi.NextContinuationToken = next + for _, file := range files { + switch file.Status { + case "folder": + loi.Prefixes = append(loi.Prefixes, file.Name) + case "upload": + loi.Objects = append(loi.Objects, minio.ObjectInfo{ + Bucket: bucket, + Name: file.Name, + ModTime: file.Timestamp, + Size: file.Size, + ETag: minio.ToS3ETag(file.Info.ID), + ContentType: file.Info.ContentType, + UserDefined: file.Info.Info, + }) + } + } + return loi, nil +} + +// GetObjectNInfo - returns object info and locked object ReadCloser +func (l *b2Objects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { + var objInfo minio.ObjectInfo + objInfo, err = l.GetObjectInfo(ctx, bucket, object, opts) + if err != nil { + return nil, err + } + + var startOffset, length int64 + startOffset, length, err = rs.GetOffsetLength(objInfo.Size) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts) + pw.CloseWithError(err) + }() + // Setup cleanup function to cause the above go-routine to + // exit in case of partial read + pipeCloser := func() { pr.Close() } + return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil +} + +// GetObject reads an object from B2. Supports additional +// parameters like offset and length which are synonymous with +// HTTP Range requests. +// +// startOffset indicates the starting read location of the object. +// length indicates the total length of the object. +func (l *b2Objects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return err + } + reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length) + if err != nil { + logger.LogIf(ctx, err) + return b2ToObjectError(err, bucket, object) + } + defer reader.Close() + _, err = io.Copy(writer, reader) + logger.LogIf(ctx, err) + return b2ToObjectError(err, bucket, object) +} + +// GetObjectInfo reads object info and replies back ObjectInfo +func (l *b2Objects) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return objInfo, err + } + f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1) + if err != nil { + logger.LogIf(ctx, err) + return objInfo, b2ToObjectError(err, bucket, object) + } + f.Close() + fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx) + if err != nil { + logger.LogIf(ctx, err) + return objInfo, b2ToObjectError(err, bucket, object) + } + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + ETag: minio.ToS3ETag(fi.ID), + Size: fi.Size, + ModTime: fi.Timestamp, + ContentType: fi.ContentType, + UserDefined: fi.Info, + }, nil +} + +// In B2 - You must always include the X-Bz-Content-Sha1 header with +// your upload request. The value you provide can be: +// (1) the 40-character hex checksum of the file, +// (2) the string hex_digits_at_end, or +// (3) the string do_not_verify. +// For more reference - https://www.backblaze.com/b2/docs/uploading.html +// +// In our case we are going to use (2) option +const sha1AtEOF = "hex_digits_at_end" + +// With the second option mentioned above, you append the 40-character hex sha1 +// to the end of the request body, immediately after the contents of the file +// being uploaded. Note that the content length is the size of the file plus 40 +// of the original size of the reader. +// +// newB2Reader implements a B2 compatible reader by wrapping the hash.Reader into +// a new io.Reader which will emit out the sha1 hex digits at io.EOF. +// It also means that your overall content size is now original size + 40 bytes. +// Additionally this reader also verifies Hash encapsulated inside hash.Reader +// at io.EOF if the verification failed we return an error and do not send +// the content to server. +func newB2Reader(r *h2.Reader, size int64) *Reader { + return &Reader{ + r: r, + size: size, + sha1Hash: sha1.New(), + } +} + +// Reader - is a Reader wraps the hash.Reader which will emit out the sha1 +// hex digits at io.EOF. It also means that your overall content size is +// now original size + 40 bytes. Additionally this reader also verifies +// Hash encapsulated inside hash.Reader at io.EOF if the verification +// failed we return an error and do not send the content to server. +type Reader struct { + r *h2.Reader + size int64 + sha1Hash hash.Hash + + isEOF bool + buf *strings.Reader +} + +// Size - Returns the total size of Reader. +func (nb *Reader) Size() int64 { return nb.size + 40 } +func (nb *Reader) Read(p []byte) (int, error) { + if nb.isEOF { + return nb.buf.Read(p) + } + // Read into hash to update the on going checksum. + n, err := io.TeeReader(nb.r, nb.sha1Hash).Read(p) + if err == io.EOF { + // Stream is not corrupted on this end + // now fill in the last 40 bytes of sha1 hex + // so that the server can verify the stream on + // their end. + err = nil + nb.isEOF = true + nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.sha1Hash.Sum(nil))) + } + return n, err +} + +// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB. +func (l *b2Objects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { + data := r.Reader + + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return objInfo, err + } + contentType := opts.UserDefined["content-type"] + delete(opts.UserDefined, "content-type") + + var u *b2.URL + u, err = bkt.GetUploadURL(l.ctx) + if err != nil { + logger.LogIf(ctx, err) + return objInfo, b2ToObjectError(err, bucket, object) + } + + hr := newB2Reader(data, data.Size()) + var f *b2.File + f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, opts.UserDefined) + if err != nil { + logger.LogIf(ctx, err) + return objInfo, b2ToObjectError(err, bucket, object) + } + + var fi *b2.FileInfo + fi, err = f.GetFileInfo(l.ctx) + if err != nil { + logger.LogIf(ctx, err) + return objInfo, b2ToObjectError(err, bucket, object) + } + + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + ETag: minio.ToS3ETag(fi.ID), + Size: fi.Size, + ModTime: fi.Timestamp, + ContentType: fi.ContentType, + UserDefined: fi.Info, + }, nil +} + +// DeleteObject deletes a blob in bucket +func (l *b2Objects) DeleteObject(ctx context.Context, bucket string, object string) error { + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return err + } + reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1) + if err != nil { + logger.LogIf(ctx, err) + return b2ToObjectError(err, bucket, object) + } + io.Copy(ioutil.Discard, reader) + reader.Close() + err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx) + logger.LogIf(ctx, err) + return b2ToObjectError(err, bucket, object) +} + +// ListMultipartUploads lists all multipart uploads. +func (l *b2Objects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, + delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) { + // keyMarker, prefix, delimiter are all ignored, Backblaze B2 doesn't support any + // of these parameters only equivalent parameter is uploadIDMarker. + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return lmi, err + } + // The maximum number of files to return from this call. + // The default value is 100, and the maximum allowed is 100. + if maxUploads > 100 { + maxUploads = 100 + } + largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads) + if err != nil { + logger.LogIf(ctx, err) + return lmi, b2ToObjectError(err, bucket) + } + lmi = minio.ListMultipartsInfo{ + MaxUploads: maxUploads, + } + if nextMarker != "" { + lmi.IsTruncated = true + lmi.NextUploadIDMarker = nextMarker + } + for _, largeFile := range largeFiles { + lmi.Uploads = append(lmi.Uploads, minio.MultipartInfo{ + Object: largeFile.Name, + UploadID: largeFile.ID, + Initiated: largeFile.Timestamp, + }) + } + return lmi, nil +} + +// NewMultipartUpload upload object in multiple parts, uses B2's LargeFile upload API. +// Large files can range in size from 5MB to 10TB. +// Each large file must consist of at least 2 parts, and all of the parts except the +// last one must be at least 5MB in size. The last part must contain at least one byte. +// For more information - https://www.backblaze.com/b2/docs/large_files.html +func (l *b2Objects) NewMultipartUpload(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (string, error) { + var uploadID string + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return uploadID, err + } + + contentType := opts.UserDefined["content-type"] + delete(opts.UserDefined, "content-type") + lf, err := bkt.StartLargeFile(l.ctx, object, contentType, opts.UserDefined) + if err != nil { + logger.LogIf(ctx, err) + return uploadID, b2ToObjectError(err, bucket, object) + } + + return lf.ID, nil +} + +// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API. +func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, err error) { + data := r.Reader + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return pi, err + } + + fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx) + if err != nil { + logger.LogIf(ctx, err) + return pi, b2ToObjectError(err, bucket, object, uploadID) + } + + hr := newB2Reader(data, data.Size()) + sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID) + if err != nil { + logger.LogIf(ctx, err) + return pi, b2ToObjectError(err, bucket, object, uploadID) + } + + return minio.PartInfo{ + PartNumber: partID, + LastModified: minio.UTCNow(), + ETag: minio.ToS3ETag(sha1), + Size: data.Size(), + }, nil +} + +// ListObjectParts returns all object parts for specified object in specified bucket, uses B2's LargeFile upload API. +func (l *b2Objects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, err error) { + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return lpi, err + } + lpi = minio.ListPartsInfo{ + Bucket: bucket, + Object: object, + UploadID: uploadID, + MaxParts: maxParts, + PartNumberMarker: partNumberMarker, + } + // startPartNumber must be in the range 1 - 10000 for B2. + partNumberMarker++ + partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts) + if err != nil { + logger.LogIf(ctx, err) + return lpi, b2ToObjectError(err, bucket, object, uploadID) + } + if next != 0 { + lpi.IsTruncated = true + lpi.NextPartNumberMarker = next + } + for _, part := range partsList { + lpi.Parts = append(lpi.Parts, minio.PartInfo{ + PartNumber: part.Number, + ETag: minio.ToS3ETag(part.SHA1), + Size: part.Size, + }) + } + return lpi, nil +} + +// AbortMultipartUpload aborts a on going multipart upload, uses B2's LargeFile upload API. +func (l *b2Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error { + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return err + } + err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx) + logger.LogIf(ctx, err) + return b2ToObjectError(err, bucket, object, uploadID) +} + +// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API. +func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, err error) { + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return oi, err + } + hashes := make(map[int]string) + for i, uploadedPart := range uploadedParts { + // B2 requires contigous part numbers starting with 1, they do not support + // hand picking part numbers, we return an S3 compatible error instead. + if i+1 != uploadedPart.PartNumber { + logger.LogIf(ctx, minio.InvalidPart{}) + return oi, b2ToObjectError(minio.InvalidPart{}, bucket, object, uploadID) + } + + // Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag. + hashes[uploadedPart.PartNumber] = strings.TrimSuffix(uploadedPart.ETag, "-1") + } + + if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil { + logger.LogIf(ctx, err) + return oi, b2ToObjectError(err, bucket, object, uploadID) + } + + return l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{}) +} + +// SetBucketPolicy - B2 supports 2 types of bucket policies: +// bucketType.AllPublic - bucketTypeReadOnly means that anybody can download the files is the bucket; +// bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them. +// Default is AllPrivate for all buckets. +func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { + policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy) + if err != nil { + // This should not happen. + return b2ToObjectError(err, bucket) + } + + var policies []minio.BucketAccessPolicy + for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") { + policies = append(policies, minio.BucketAccessPolicy{ + Prefix: prefix, + Policy: policy, + }) + } + prefix := bucket + "/*" // For all objects inside the bucket. + if len(policies) != 1 { + logger.LogIf(ctx, minio.NotImplemented{}) + return minio.NotImplemented{} + } + if policies[0].Prefix != prefix { + logger.LogIf(ctx, minio.NotImplemented{}) + return minio.NotImplemented{} + } + if policies[0].Policy != miniogopolicy.BucketPolicyReadOnly { + logger.LogIf(ctx, minio.NotImplemented{}) + return minio.NotImplemented{} + } + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return err + } + bkt.Type = bucketTypeReadOnly + _, err = bkt.Update(l.ctx) + logger.LogIf(ctx, err) + return b2ToObjectError(err) +} + +// GetBucketPolicy, returns the current bucketType from B2 backend and convert +// it into S3 compatible bucket policy info. +func (l *b2Objects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return nil, err + } + + // bkt.Type can also be snapshot, but it is only allowed through B2 browser console, + // just return back as policy not found for all cases. + // CreateBucket always sets the value to allPrivate by default. + if bkt.Type != bucketTypeReadOnly { + return nil, minio.BucketPolicyNotFound{Bucket: bucket} + } + + return &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet( + policy.GetBucketLocationAction, + policy.ListBucketAction, + policy.GetObjectAction, + ), + policy.NewResourceSet( + policy.NewResource(bucket, ""), + policy.NewResource(bucket, "*"), + ), + condition.NewFunctions(), + ), + }, + }, nil +} + +// DeleteBucketPolicy - resets the bucketType of bucket on B2 to 'allPrivate'. +func (l *b2Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error { + bkt, err := l.Bucket(ctx, bucket) + if err != nil { + return err + } + bkt.Type = bucketTypePrivate + _, err = bkt.Update(l.ctx) + logger.LogIf(ctx, err) + return b2ToObjectError(err) +} + +// IsCompressionSupported returns whether compression is applicable for this layer. +func (l *b2Objects) IsCompressionSupported() bool { + return false +} diff --git a/cmd/gateway/b2/gateway-b2_test.go b/cmd/gateway/b2/gateway-b2_test.go new file mode 100644 index 000000000..349127616 --- /dev/null +++ b/cmd/gateway/b2/gateway-b2_test.go @@ -0,0 +1,119 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package b2 + +import ( + "fmt" + "testing" + + b2 "github.com/minio/blazer/base" + + minio "github.com/minio/minio/cmd" +) + +// Test b2 object error. +func TestB2ObjectError(t *testing.T) { + testCases := []struct { + params []string + b2Err error + expectedErr error + }{ + { + []string{}, nil, nil, + }, + { + []string{}, fmt.Errorf("Not *Error"), fmt.Errorf("Not *Error"), + }, + { + []string{}, fmt.Errorf("Non B2 Error"), fmt.Errorf("Non B2 Error"), + }, + { + []string{"bucket"}, b2.Error{ + StatusCode: 1, + Code: "duplicate_bucket_name", + }, minio.BucketAlreadyOwnedByYou{ + Bucket: "bucket", + }, + }, + { + []string{"bucket"}, b2.Error{ + StatusCode: 1, + Code: "bad_request", + }, minio.BucketNotFound{ + Bucket: "bucket", + }, + }, + { + []string{"bucket", "object"}, b2.Error{ + StatusCode: 1, + Code: "bad_request", + }, minio.ObjectNameInvalid{ + Bucket: "bucket", + Object: "object", + }, + }, + { + []string{"bucket"}, b2.Error{ + StatusCode: 1, + Code: "bad_bucket_id", + }, minio.BucketNotFound{Bucket: "bucket"}, + }, + { + []string{"bucket", "object"}, b2.Error{ + StatusCode: 1, + Code: "file_not_present", + }, minio.ObjectNotFound{ + Bucket: "bucket", + Object: "object", + }, + }, + { + []string{"bucket", "object"}, b2.Error{ + StatusCode: 1, + Code: "not_found", + }, minio.ObjectNotFound{ + Bucket: "bucket", + Object: "object", + }, + }, + { + []string{"bucket"}, b2.Error{ + StatusCode: 1, + Code: "cannot_delete_non_empty_bucket", + }, minio.BucketNotEmpty{ + Bucket: "bucket", + }, + }, + { + []string{"bucket", "object", "uploadID"}, b2.Error{ + StatusCode: 1, + Message: "No active upload for", + }, minio.InvalidUploadID{ + UploadID: "uploadID", + }, + }, + } + + for i, testCase := range testCases { + actualErr := b2ToObjectError(testCase.b2Err, testCase.params...) + if actualErr != nil { + if actualErr.Error() != testCase.expectedErr.Error() { + t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedErr, actualErr) + } + } + } +} diff --git a/cmd/gateway/gateway.go b/cmd/gateway/gateway.go index b72fac338..39ad63209 100644 --- a/cmd/gateway/gateway.go +++ b/cmd/gateway/gateway.go @@ -23,5 +23,9 @@ import ( _ "github.com/minio/minio/cmd/gateway/nas" _ "github.com/minio/minio/cmd/gateway/oss" _ "github.com/minio/minio/cmd/gateway/s3" + + // B2 is specifically kept here to avoid re-ordering by goimports, + // please ask on github.com/minio/minio/issues before changing this. + _ "github.com/minio/minio/cmd/gateway/b2" // Add your gateway here. ) diff --git a/docs/gateway/README.md b/docs/gateway/README.md index 1d62f0e84..3c9199e12 100644 --- a/docs/gateway/README.md +++ b/docs/gateway/README.md @@ -5,3 +5,5 @@ Minio Gateway adds Amazon S3 compatibility to third party cloud storage provider - [S3](https://github.com/minio/minio/blob/master/docs/gateway/s3.md) - [Google Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/gcs.md) - [Alibaba Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/oss.md) +- [Backblaze B2](https://github.com/minio/minio/blob/master/docs/gateway/b2.md) + diff --git a/docs/gateway/b2.md b/docs/gateway/b2.md new file mode 100644 index 000000000..3b0947ad1 --- /dev/null +++ b/docs/gateway/b2.md @@ -0,0 +1,58 @@ +# Minio B2 Gateway [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) +Minio Gateway adds Amazon S3 compatibility to Backblaze B2 Cloud Storage. + +## Run Minio Gateway for Backblaze B2 Cloud Storage +Please follow this [guide](https://www.backblaze.com/b2/docs/quick_account.html) to create an account on backblaze.com to obtain your access credentials for B2 Cloud storage. + +### Using Docker +``` +docker run -p 9000:9000 --name b2-s3 \ + -e "MINIO_ACCESS_KEY=b2_account_id" \ + -e "MINIO_SECRET_KEY=b2_application_key" \ + minio/minio gateway b2 +``` + +### Using Binary +``` +export MINIO_ACCESS_KEY=b2_account_id +export MINIO_SECRET_KEY=b2_application_key +minio gateway b2 +``` + +## Test using Minio Browser +Minio Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure that your server has started successfully. + +![Screenshot](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/minio-browser-gateway.png) + +## Test using Minio Client `mc` +`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. + +### Configure `mc` +``` +mc config host add myb2 http://gateway-ip:9000 b2_account_id b2_application_key +``` + +### List buckets on Backblaze B2 +``` +mc ls myb2 +[2017-02-22 01:50:43 PST] 0B ferenginar/ +[2017-02-26 21:43:51 PST] 0B my-bucket/ +[2017-02-26 22:10:11 PST] 0B test-bucket1/ +``` + +### Known limitations +Gateway inherits the following B2 limitations: + +- No support for CopyObject S3 API (There are no equivalent APIs available on Backblaze B2). +- No support for CopyObjectPart S3 API (There are no equivalent APIs available on Backblaze B2). +- Only read-only bucket policy supported at bucket level, all other variations will return API Notimplemented error. +- DeleteObject() might not delete the object right away on Backblaze B2, so you might see the object immediately after a Delete request. + +Other limitations: + +- Bucket notification APIs are not supported. + +## Explore Further +- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide) +- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio) +- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide) diff --git a/vendor/github.com/garyburd/redigo/internal/commandinfo.go b/vendor/github.com/garyburd/redigo/internal/commandinfo.go deleted file mode 100644 index 11e584257..000000000 --- a/vendor/github.com/garyburd/redigo/internal/commandinfo.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2014 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package internal // import "github.com/garyburd/redigo/internal" - -import ( - "strings" -) - -const ( - WatchState = 1 << iota - MultiState - SubscribeState - MonitorState -) - -type CommandInfo struct { - Set, Clear int -} - -var commandInfos = map[string]CommandInfo{ - "WATCH": {Set: WatchState}, - "UNWATCH": {Clear: WatchState}, - "MULTI": {Set: MultiState}, - "EXEC": {Clear: WatchState | MultiState}, - "DISCARD": {Clear: WatchState | MultiState}, - "PSUBSCRIBE": {Set: SubscribeState}, - "SUBSCRIBE": {Set: SubscribeState}, - "MONITOR": {Set: MonitorState}, -} - -func init() { - for n, ci := range commandInfos { - commandInfos[strings.ToLower(n)] = ci - } -} - -func LookupCommandInfo(commandName string) CommandInfo { - if ci, ok := commandInfos[commandName]; ok { - return ci - } - return commandInfos[strings.ToUpper(commandName)] -} diff --git a/vendor/github.com/minio/blazer/LICENSE b/vendor/github.com/minio/blazer/LICENSE new file mode 100644 index 000000000..88755c6ad --- /dev/null +++ b/vendor/github.com/minio/blazer/LICENSE @@ -0,0 +1,13 @@ +Copyright 2016, Google + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/minio/blazer/base/base.go b/vendor/github.com/minio/blazer/base/base.go new file mode 100644 index 000000000..738106379 --- /dev/null +++ b/vendor/github.com/minio/blazer/base/base.go @@ -0,0 +1,1204 @@ +// Copyright 2016, Google +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package base provides a very low-level interface on top of the B2 v1 API. +// It is not intended to be used directly. +// +// It currently lacks support for the following APIs: +// +// b2_download_file_by_id +// b2_list_unfinished_large_files +package base + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/minio/blazer/internal/b2types" + "github.com/minio/blazer/internal/blog" +) + +const ( + APIBase = "https://api.backblazeb2.com" + DefaultUserAgent = "blazer/0.1.1" +) + +type Error struct { + Message string + Method string + StatusCode int + Code string + retry int +} + +func (e Error) Error() string { + if e.Method == "" { + return fmt.Sprintf("b2 error: %s", e.Message) + } + return fmt.Sprintf("%s: %d: %s: %s", e.Method, e.StatusCode, e.Code, e.Message) +} + +// Action checks an error and returns a recommended course of action. +func Action(err error) ErrAction { + e, ok := err.(Error) + if !ok { + return Punt + } + if e.retry > 0 { + return Retry + } + if e.StatusCode >= http.StatusInternalServerError && e.StatusCode < 600 { + if e.Method == "b2_upload_file" || e.Method == "b2_upload_part" { + return AttemptNewUpload + } + } + switch e.StatusCode { + case http.StatusUnauthorized: + if e.Method == "b2_authorize_account" { + return Punt + } + if e.Method == "b2_upload_file" || e.Method == "b2_upload_part" { + return AttemptNewUpload + } + return ReAuthenticate + case http.StatusBadRequest: + // See restic/restic#1207 + if e.Method == "b2_upload_file" && strings.HasPrefix(e.Message, "more than one upload using auth token") { + return AttemptNewUpload + } + return Punt + case http.StatusRequestTimeout: + return AttemptNewUpload + case http.StatusTooManyRequests, http.StatusInternalServerError, http.StatusServiceUnavailable: + return Retry + } + return Punt +} + +// ErrAction is an action that a caller can take when any function returns an +// error. +type ErrAction int + +// Code returns the error code and message. +func Code(err error) (int, string, string) { + e, ok := err.(Error) + if !ok { + return 0, "", "" + } + return e.StatusCode, e.Code, e.Message +} + +const ( + // ReAuthenticate indicates that the B2 account authentication tokens have + // expired, and should be refreshed with a new call to AuthorizeAccount. + ReAuthenticate ErrAction = iota + + // AttemptNewUpload indicates that an upload's authentication token (or URL + // endpoint) has expired, and that users should request new ones with a call + // to GetUploadURL or GetUploadPartURL. + AttemptNewUpload + + // Retry indicates that the caller should wait an appropriate amount of time, + // and then reattempt the RPC. + Retry + + // Punt means that there is no useful action to be taken on this error, and + // that it should be displayed to the user. + Punt +) + +func mkErr(resp *http.Response) error { + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + logResponse(resp, data) + msg := &b2types.ErrorMessage{} + if err := json.Unmarshal(data, msg); err != nil { + return err + } + var retryAfter int + retry := resp.Header.Get("Retry-After") + if retry != "" { + r, err := strconv.ParseInt(retry, 10, 64) + if err != nil { + return err + } + retryAfter = int(r) + } + return Error{ + Message: msg.Msg, + StatusCode: resp.StatusCode, + Code: msg.Code, + Method: resp.Request.Header.Get("X-Blazer-Method"), + retry: retryAfter, + } +} + +// Backoff returns an appropriate amount of time to wait, given an error, if +// any was returned by the server. If the return value is 0, but Action +// indicates Retry, the user should implement their own exponential backoff, +// beginning with one second. +func Backoff(err error) time.Duration { + e, ok := err.(Error) + if !ok { + return 0 + } + return time.Duration(e.retry) * time.Second +} + +func logRequest(req *http.Request, args []byte) { + if !blog.V(2) { + return + } + var headers []string + for k, v := range req.Header { + if k == "Authorization" || k == "X-Blazer-Method" { + continue + } + headers = append(headers, fmt.Sprintf("%s: %s", k, strings.Join(v, ","))) + } + hstr := strings.Join(headers, ";") + method := req.Header.Get("X-Blazer-Method") + if args != nil { + blog.V(2).Infof(">> %s uri: %v headers: {%s} args: (%s)", method, req.URL, hstr, string(args)) + return + } + blog.V(2).Infof(">> %s uri: %v {%s} (no args)", method, req.URL, hstr) +} + +var authRegexp = regexp.MustCompile(`"authorizationToken": ".[^"]*"`) + +func logResponse(resp *http.Response, reply []byte) { + if !blog.V(2) { + return + } + var headers []string + for k, v := range resp.Header { + headers = append(headers, fmt.Sprintf("%s: %s", k, strings.Join(v, ","))) + } + hstr := strings.Join(headers, "; ") + method := resp.Request.Header.Get("X-Blazer-Method") + id := resp.Request.Header.Get("X-Blazer-Request-ID") + if reply != nil { + safe := string(authRegexp.ReplaceAll(reply, []byte(`"authorizationToken": "[redacted]"`))) + blog.V(2).Infof("<< %s (%s) %s {%s} (%s)", method, id, resp.Status, hstr, safe) + return + } + blog.V(2).Infof("<< %s (%s) %s {%s} (no reply)", method, id, resp.Status, hstr) +} + +func millitime(t int64) time.Time { + return time.Unix(t/1000, t%1000*1e6) +} + +type b2Options struct { + transport http.RoundTripper + failSomeUploads bool + expireTokens bool + capExceeded bool + apiBase string + userAgent string +} + +func (o *b2Options) getAPIBase() string { + if o.apiBase != "" { + return o.apiBase + } + return APIBase +} + +func (o *b2Options) getUserAgent() string { + if o.userAgent != "" { + return fmt.Sprintf("%s %s", o.userAgent, DefaultUserAgent) + } + return DefaultUserAgent +} + +func (o *b2Options) getTransport() http.RoundTripper { + if o.transport == nil { + return http.DefaultTransport + } + return o.transport +} + +// B2 holds account information for Backblaze. +type B2 struct { + accountID string + authToken string + apiURI string + DownloadURI string + MinPartSize int + opts *b2Options +} + +// Update replaces the B2 object with a new one, in-place. +func (b *B2) Update(n *B2) { + b.accountID = n.accountID + b.authToken = n.authToken + b.apiURI = n.apiURI + b.DownloadURI = n.DownloadURI + b.MinPartSize = n.MinPartSize + b.opts = n.opts +} + +type httpReply struct { + resp *http.Response + err error +} + +func makeNetRequest(req *http.Request, rt http.RoundTripper) <-chan httpReply { + ch := make(chan httpReply) + go func() { + resp, err := rt.RoundTrip(req) + ch <- httpReply{resp, err} + close(ch) + }() + return ch +} + +type requestBody struct { + size int64 + body io.Reader +} + +func (rb *requestBody) getSize() int64 { + if rb == nil { + return 0 + } + return rb.size +} + +func (rb *requestBody) getBody() io.Reader { + if rb == nil { + return nil + } + return rb.body +} + +type keepFinalBytes struct { + r io.Reader + remain int + sha [40]byte +} + +func (k *keepFinalBytes) Read(p []byte) (int, error) { + n, err := k.r.Read(p) + if k.remain-n > 40 { + k.remain -= n + return n, err + } + // This was a whole lot harder than it looks. + pi := -40 + k.remain + if pi < 0 { + pi = 0 + } + pe := n + ki := 40 - k.remain + if ki < 0 { + ki = 0 + } + ke := n - k.remain + 40 + copy(k.sha[ki:ke], p[pi:pe]) + k.remain -= n + return n, err +} + +var reqID int64 + +func (o *b2Options) makeRequest(ctx context.Context, method, verb, uri string, b2req, b2resp interface{}, headers map[string]string, body *requestBody) error { + var args []byte + if b2req != nil { + enc, err := json.Marshal(b2req) + if err != nil { + return err + } + args = enc + body = &requestBody{ + body: bytes.NewBuffer(enc), + size: int64(len(enc)), + } + } + req, err := http.NewRequest(verb, uri, body.getBody()) + if err != nil { + return err + } + req.ContentLength = body.getSize() + for k, v := range headers { + if strings.HasPrefix(k, "X-Bz-Info") || strings.HasPrefix(k, "X-Bz-File-Name") { + v = escape(v) + } + req.Header.Set(k, v) + } + req.Header.Set("User-Agent", o.getUserAgent()) + req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1))) + req.Header.Set("X-Blazer-Method", method) + if o.failSomeUploads { + req.Header.Add("X-Bz-Test-Mode", "fail_some_uploads") + } + if o.expireTokens { + req.Header.Add("X-Bz-Test-Mode", "expire_some_account_authorization_tokens") + } + if o.capExceeded { + req.Header.Add("X-Bz-Test-Mode", "force_cap_exceeded") + } + cancel := make(chan struct{}) + req.Cancel = cancel + logRequest(req, args) + ch := makeNetRequest(req, o.getTransport()) + var reply httpReply + select { + case reply = <-ch: + case <-ctx.Done(): + close(cancel) + return ctx.Err() + } + if reply.err != nil { + // Connection errors are retryable. + blog.V(2).Infof(">> %s uri: %v err: %v", method, req.URL, reply.err) + return Error{ + Message: reply.err.Error(), + retry: 1, + } + } + resp := reply.resp + defer resp.Body.Close() + if resp.StatusCode != 200 { + return mkErr(resp) + } + var replyArgs []byte + if b2resp != nil { + rbuf := &bytes.Buffer{} + r := io.TeeReader(resp.Body, rbuf) + decoder := json.NewDecoder(r) + if err := decoder.Decode(b2resp); err != nil { + return err + } + replyArgs = rbuf.Bytes() + } else { + replyArgs, err = ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + } + logResponse(resp, replyArgs) + return nil +} + +// AuthorizeAccount wraps b2_authorize_account. +func AuthorizeAccount(ctx context.Context, account, key string, opts ...AuthOption) (*B2, error) { + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", account, key))) + b2resp := &b2types.AuthorizeAccountResponse{} + headers := map[string]string{ + "Authorization": fmt.Sprintf("Basic %s", auth), + } + b2opts := &b2Options{} + for _, f := range opts { + f(b2opts) + } + if err := b2opts.makeRequest(ctx, "b2_authorize_account", "GET", b2opts.getAPIBase()+b2types.V1api+"b2_authorize_account", nil, b2resp, headers, nil); err != nil { + return nil, err + } + return &B2{ + accountID: b2resp.AccountID, + authToken: b2resp.AuthToken, + apiURI: b2resp.URI, + DownloadURI: b2resp.DownloadURI, + MinPartSize: b2resp.MinPartSize, + opts: b2opts, + }, nil +} + +// An AuthOption allows callers to choose per-session settings. +type AuthOption func(*b2Options) + +// UserAgent sets the User-Agent HTTP header. The default header is +// "blazer/"; the value set here will be prepended to that. This can +// be set multiple times. +func UserAgent(agent string) AuthOption { + return func(o *b2Options) { + if o.userAgent == "" { + o.userAgent = agent + return + } + o.userAgent = fmt.Sprintf("%s %s", agent, o.userAgent) + } +} + +// Transport returns an AuthOption that sets the underlying HTTP mechanism. +func Transport(rt http.RoundTripper) AuthOption { + return func(o *b2Options) { + o.transport = rt + } +} + +// FailSomeUploads requests intermittent upload failures from the B2 service. +// This is mostly useful for testing. +func FailSomeUploads() AuthOption { + return func(o *b2Options) { + o.failSomeUploads = true + } +} + +// ExpireSomeAuthTokens requests intermittent authentication failures from the +// B2 service. +func ExpireSomeAuthTokens() AuthOption { + return func(o *b2Options) { + o.expireTokens = true + } +} + +// ForceCapExceeded requests a cap limit from the B2 service. This causes all +// uploads to be treated as if they would exceed the configure B2 capacity. +func ForceCapExceeded() AuthOption { + return func(o *b2Options) { + o.capExceeded = true + } +} + +type LifecycleRule struct { + Prefix string + DaysNewUntilHidden int + DaysHiddenUntilDeleted int +} + +// CreateBucket wraps b2_create_bucket. +func (b *B2) CreateBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (*Bucket, error) { + if btype != "allPublic" { + btype = "allPrivate" + } + var b2rules []b2types.LifecycleRule + for _, rule := range rules { + b2rules = append(b2rules, b2types.LifecycleRule{ + Prefix: rule.Prefix, + DaysNewUntilHidden: rule.DaysNewUntilHidden, + DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, + }) + } + b2req := &b2types.CreateBucketRequest{ + AccountID: b.accountID, + Name: name, + Type: btype, + Info: info, + LifecycleRules: b2rules, + } + b2resp := &b2types.CreateBucketResponse{} + headers := map[string]string{ + "Authorization": b.authToken, + } + if err := b.opts.makeRequest(ctx, "b2_create_bucket", "POST", b.apiURI+b2types.V1api+"b2_create_bucket", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + var respRules []LifecycleRule + for _, rule := range b2resp.LifecycleRules { + respRules = append(respRules, LifecycleRule{ + Prefix: rule.Prefix, + DaysNewUntilHidden: rule.DaysNewUntilHidden, + DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, + }) + } + return &Bucket{ + Name: name, + Info: b2resp.Info, + LifecycleRules: respRules, + id: b2resp.BucketID, + rev: b2resp.Revision, + b2: b, + }, nil +} + +// DeleteBucket wraps b2_delete_bucket. +func (b *Bucket) DeleteBucket(ctx context.Context) error { + b2req := &b2types.DeleteBucketRequest{ + AccountID: b.b2.accountID, + BucketID: b.id, + } + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + return b.b2.opts.makeRequest(ctx, "b2_delete_bucket", "POST", b.b2.apiURI+b2types.V1api+"b2_delete_bucket", b2req, nil, headers, nil) +} + +// Bucket holds B2 bucket details. +type Bucket struct { + Name string + Type string + Info map[string]string + LifecycleRules []LifecycleRule + id string + rev int + b2 *B2 +} + +// Update wraps b2_update_bucket. +func (b *Bucket) Update(ctx context.Context) (*Bucket, error) { + var rules []b2types.LifecycleRule + for _, rule := range b.LifecycleRules { + rules = append(rules, b2types.LifecycleRule{ + DaysNewUntilHidden: rule.DaysNewUntilHidden, + DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, + Prefix: rule.Prefix, + }) + } + b2req := &b2types.UpdateBucketRequest{ + AccountID: b.b2.accountID, + BucketID: b.id, + // Name: b.Name, + Type: b.Type, + Info: b.Info, + LifecycleRules: rules, + IfRevisionIs: b.rev, + } + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + b2resp := &b2types.UpdateBucketResponse{} + if err := b.b2.opts.makeRequest(ctx, "b2_update_bucket", "POST", b.b2.apiURI+b2types.V1api+"b2_update_bucket", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + var respRules []LifecycleRule + for _, rule := range b2resp.LifecycleRules { + respRules = append(respRules, LifecycleRule{ + Prefix: rule.Prefix, + DaysNewUntilHidden: rule.DaysNewUntilHidden, + DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, + }) + } + return &Bucket{ + Name: b.Name, + Type: b2resp.Type, + Info: b2resp.Info, + LifecycleRules: respRules, + id: b2resp.BucketID, + b2: b.b2, + }, nil +} + +// BaseURL returns the base part of the download URLs. +func (b *Bucket) BaseURL() string { + return b.b2.DownloadURI +} + +// ListBuckets wraps b2_list_buckets. +func (b *B2) ListBuckets(ctx context.Context) ([]*Bucket, error) { + b2req := &b2types.ListBucketsRequest{ + AccountID: b.accountID, + } + b2resp := &b2types.ListBucketsResponse{} + headers := map[string]string{ + "Authorization": b.authToken, + } + if err := b.opts.makeRequest(ctx, "b2_list_buckets", "POST", b.apiURI+b2types.V1api+"b2_list_buckets", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + var buckets []*Bucket + for _, bucket := range b2resp.Buckets { + var rules []LifecycleRule + for _, rule := range bucket.LifecycleRules { + rules = append(rules, LifecycleRule{ + Prefix: rule.Prefix, + DaysNewUntilHidden: rule.DaysNewUntilHidden, + DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, + }) + } + buckets = append(buckets, &Bucket{ + Name: bucket.Name, + Type: bucket.Type, + Info: bucket.Info, + LifecycleRules: rules, + id: bucket.BucketID, + rev: bucket.Revision, + b2: b, + }) + } + return buckets, nil +} + +// URL holds information from the b2_get_upload_url API. +type URL struct { + uri string + token string + b2 *B2 + bucket *Bucket +} + +// Reload reloads URL in-place, by reissuing a b2_get_upload_url and +// overwriting the previous values. +func (url *URL) Reload(ctx context.Context) error { + n, err := url.bucket.GetUploadURL(ctx) + if err != nil { + return err + } + url.uri = n.uri + url.token = n.token + return nil +} + +// GetUploadURL wraps b2_get_upload_url. +func (b *Bucket) GetUploadURL(ctx context.Context) (*URL, error) { + b2req := &b2types.GetUploadURLRequest{ + BucketID: b.id, + } + b2resp := &b2types.GetUploadURLResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_get_upload_url", "POST", b.b2.apiURI+b2types.V1api+"b2_get_upload_url", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + return &URL{ + uri: b2resp.URI, + token: b2resp.Token, + b2: b.b2, + bucket: b, + }, nil +} + +// File represents a B2 file. +type File struct { + Name string + Size int64 + Status string + Timestamp time.Time + Info *FileInfo + id string + b2 *B2 +} + +// File returns a bare File struct, but with the appropriate id and b2 +// interfaces. +func (b *Bucket) File(id, name string) *File { + return &File{id: id, b2: b.b2, Name: name} +} + +// UploadFile wraps b2_upload_file. +func (u *URL) UploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (*File, error) { + headers := map[string]string{ + "Authorization": u.token, + "X-Bz-File-Name": name, + "Content-Type": contentType, + "Content-Length": fmt.Sprintf("%d", size), + "X-Bz-Content-Sha1": sha1, + } + for k, v := range info { + headers[fmt.Sprintf("X-Bz-Info-%s", k)] = v + } + b2resp := &b2types.UploadFileResponse{} + if err := u.b2.opts.makeRequest(ctx, "b2_upload_file", "POST", u.uri, nil, b2resp, headers, &requestBody{body: r, size: int64(size)}); err != nil { + return nil, err + } + return &File{ + Name: name, + Size: int64(size), + Timestamp: millitime(b2resp.Timestamp), + Status: b2resp.Action, + id: b2resp.FileID, + b2: u.b2, + }, nil +} + +// DeleteFileVersion wraps b2_delete_file_version. +func (f *File) DeleteFileVersion(ctx context.Context) error { + b2req := &b2types.DeleteFileVersionRequest{ + Name: f.Name, + FileID: f.id, + } + headers := map[string]string{ + "Authorization": f.b2.authToken, + } + return f.b2.opts.makeRequest(ctx, "b2_delete_file_version", "POST", f.b2.apiURI+b2types.V1api+"b2_delete_file_version", b2req, nil, headers, nil) +} + +// LargeFile holds information necessary to implement B2 large file support. +type LargeFile struct { + ID string + Timestamp time.Time + Name string + ContentType string + Info map[string]string + + b2 *B2 + + mu sync.Mutex + size int64 + hashes map[int]string +} + +// StartLargeFile wraps b2_start_large_file. +func (b *Bucket) StartLargeFile(ctx context.Context, name, contentType string, info map[string]string) (*LargeFile, error) { + b2req := &b2types.StartLargeFileRequest{ + BucketID: b.id, + Name: name, + ContentType: contentType, + Info: info, + } + b2resp := &b2types.StartLargeFileResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_start_large_file", "POST", b.b2.apiURI+b2types.V1api+"b2_start_large_file", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + return &LargeFile{ + ID: b2resp.ID, + b2: b.b2, + hashes: make(map[int]string), + }, nil +} + +// ListUnfinishedLargeFiles - lists all the unfinied large files. +func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, continuation string, count int) ([]*LargeFile, string, error) { + b2req := &b2types.ListUnfinishedLargeFilesRequest{ + BucketID: b.id, + Continuation: continuation, + Count: count, + } + b2resp := &b2types.ListUnfinishedLargeFilesResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_list_unfinished_large_files", "POST", b.b2.apiURI+b2types.V1api+"b2_list_unfinished_large_files", + b2req, b2resp, headers, nil); err != nil { + return nil, "", err + } + cont := b2resp.NextID + var largeFiles []*LargeFile + for _, f := range b2resp.Files { + largeFiles = append(largeFiles, &LargeFile{ + ID: f.ID, + Timestamp: millitime(f.Timestamp), + Name: f.Name, + Info: f.Info, + ContentType: f.ContentType, + b2: b.b2, + hashes: make(map[int]string), + }) + } + return largeFiles, cont, nil +} + +// CancelLargeFile wraps b2_cancel_large_file. +func (l *LargeFile) CancelLargeFile(ctx context.Context) error { + b2req := &b2types.CancelLargeFileRequest{ + ID: l.ID, + } + headers := map[string]string{ + "Authorization": l.b2.authToken, + } + return l.b2.opts.makeRequest(ctx, "b2_cancel_large_file", "POST", l.b2.apiURI+b2types.V1api+"b2_cancel_large_file", b2req, nil, headers, nil) +} + +// FilePart is a piece of a started, but not finished, large file upload. +type FilePart struct { + Number int + SHA1 string + Size int64 +} + +// ListParts wraps b2_list_parts. +func (f *File) ListParts(ctx context.Context, next, count int) ([]*FilePart, int, error) { + b2req := &b2types.ListPartsRequest{ + ID: f.id, + Start: next, + Count: count, + } + b2resp := &b2types.ListPartsResponse{} + headers := map[string]string{ + "Authorization": f.b2.authToken, + } + if err := f.b2.opts.makeRequest(ctx, "b2_list_parts", "POST", f.b2.apiURI+b2types.V1api+"b2_list_parts", b2req, b2resp, headers, nil); err != nil { + return nil, 0, err + } + var parts []*FilePart + for _, part := range b2resp.Parts { + parts = append(parts, &FilePart{ + Number: part.Number, + SHA1: part.SHA1, + Size: part.Size, + }) + } + return parts, b2resp.Next, nil +} + +// CompileParts returns a LargeFile that can accept new data. Seen is a +// mapping of completed part numbers to SHA1 strings; size is the total size of +// all the completed parts to this point. +func (f *File) CompileParts(size int64, seen map[int]string) *LargeFile { + s := make(map[int]string) + for k, v := range seen { + s[k] = v + } + return &LargeFile{ + ID: f.id, + b2: f.b2, + size: size, + hashes: s, + } +} + +// FileChunk holds information necessary for uploading file chunks. +type FileChunk struct { + url string + token string + file *LargeFile +} + +type getUploadPartURLRequest struct { + ID string `json:"fileId"` +} + +type getUploadPartURLResponse struct { + URL string `json:"uploadUrl"` + Token string `json:"authorizationToken"` +} + +// GetUploadPartURL wraps b2_get_upload_part_url. +func (l *LargeFile) GetUploadPartURL(ctx context.Context) (*FileChunk, error) { + b2req := &getUploadPartURLRequest{ + ID: l.ID, + } + b2resp := &getUploadPartURLResponse{} + headers := map[string]string{ + "Authorization": l.b2.authToken, + } + if err := l.b2.opts.makeRequest(ctx, "b2_get_upload_part_url", "POST", l.b2.apiURI+b2types.V1api+"b2_get_upload_part_url", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + return &FileChunk{ + url: b2resp.URL, + token: b2resp.Token, + file: l, + }, nil +} + +// Reload reloads FileChunk in-place. +func (fc *FileChunk) Reload(ctx context.Context) error { + n, err := fc.file.GetUploadPartURL(ctx) + if err != nil { + return err + } + fc.url = n.url + fc.token = n.token + return nil +} + +// UploadPart wraps b2_upload_part. +func (fc *FileChunk) UploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (string, error) { + headers := map[string]string{ + "Authorization": fc.token, + "X-Bz-Part-Number": fmt.Sprintf("%d", index), + "Content-Length": fmt.Sprintf("%d", size), + "X-Bz-Content-Sha1": sha1, + } + b2resp := &b2types.UploadPartResponse{} + if sha1 == "hex_digits_at_end" { + r = &keepFinalBytes{r: r, remain: size} + } + if err := fc.file.b2.opts.makeRequest(ctx, "b2_upload_part", "POST", fc.url, nil, b2resp, headers, &requestBody{body: r, size: int64(size)}); err != nil { + return "", err + } + fc.file.mu.Lock() + if sha1 == "hex_digits_at_end" { + sha1 = string(r.(*keepFinalBytes).sha[:]) + } + fc.file.hashes[index] = sha1 + fc.file.size += int64(size) + fc.file.mu.Unlock() + return b2resp.SHA1, nil +} + +// FinishLargeFile wraps b2_finish_large_file. +func (l *LargeFile) FinishLargeFile(ctx context.Context) (*File, error) { + l.mu.Lock() + defer l.mu.Unlock() + b2req := &b2types.FinishLargeFileRequest{ + ID: l.ID, + Hashes: make([]string, len(l.hashes)), + } + b2resp := &b2types.FinishLargeFileResponse{} + for k, v := range l.hashes { + b2req.Hashes[k-1] = v + } + headers := map[string]string{ + "Authorization": l.b2.authToken, + } + if err := l.b2.opts.makeRequest(ctx, "b2_finish_large_file", "POST", l.b2.apiURI+b2types.V1api+"b2_finish_large_file", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + return &File{ + Name: b2resp.Name, + Size: l.size, + Timestamp: millitime(b2resp.Timestamp), + Status: b2resp.Action, + id: b2resp.FileID, + b2: l.b2, + }, nil +} + +// ListFileNames wraps b2_list_file_names. +func (b *Bucket) ListFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]*File, string, error) { + b2req := &b2types.ListFileNamesRequest{ + Count: count, + Continuation: continuation, + BucketID: b.id, + Prefix: prefix, + Delimiter: delimiter, + } + b2resp := &b2types.ListFileNamesResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_list_file_names", "POST", b.b2.apiURI+b2types.V1api+"b2_list_file_names", b2req, b2resp, headers, nil); err != nil { + return nil, "", err + } + cont := b2resp.Continuation + var files []*File + for _, f := range b2resp.Files { + files = append(files, &File{ + Name: f.Name, + Size: f.Size, + Status: f.Action, + Timestamp: millitime(f.Timestamp), + Info: &FileInfo{ + Name: f.Name, + SHA1: f.SHA1, + Size: f.Size, + ContentType: f.ContentType, + Info: f.Info, + Status: f.Action, + Timestamp: millitime(f.Timestamp), + }, + id: f.FileID, + b2: b.b2, + }) + } + return files, cont, nil +} + +// ListFileVersions wraps b2_list_file_versions. +func (b *Bucket) ListFileVersions(ctx context.Context, count int, startName, startID, prefix, delimiter string) ([]*File, string, string, error) { + b2req := &b2types.ListFileVersionsRequest{ + BucketID: b.id, + Count: count, + StartName: startName, + StartID: startID, + Prefix: prefix, + Delimiter: delimiter, + } + b2resp := &b2types.ListFileVersionsResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_list_file_versions", "POST", b.b2.apiURI+b2types.V1api+"b2_list_file_versions", b2req, b2resp, headers, nil); err != nil { + return nil, "", "", err + } + var files []*File + for _, f := range b2resp.Files { + files = append(files, &File{ + Name: f.Name, + Size: f.Size, + Status: f.Action, + Timestamp: millitime(f.Timestamp), + Info: &FileInfo{ + Name: f.Name, + SHA1: f.SHA1, + Size: f.Size, + ContentType: f.ContentType, + Info: f.Info, + Status: f.Action, + Timestamp: millitime(f.Timestamp), + }, + id: f.FileID, + b2: b.b2, + }) + } + return files, b2resp.NextName, b2resp.NextID, nil +} + +// GetDownloadAuthorization wraps b2_get_download_authorization. +func (b *Bucket) GetDownloadAuthorization(ctx context.Context, prefix string, valid time.Duration) (string, error) { + b2req := &b2types.GetDownloadAuthorizationRequest{ + BucketID: b.id, + Prefix: prefix, + Valid: int(valid.Seconds()), + } + b2resp := &b2types.GetDownloadAuthorizationResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_get_download_authorization", "POST", b.b2.apiURI+b2types.V1api+"b2_get_download_authorization", b2req, b2resp, headers, nil); err != nil { + return "", err + } + return b2resp.Token, nil +} + +// FileReader is an io.ReadCloser that downloads a file from B2. +type FileReader struct { + io.ReadCloser + ContentLength int + ContentType string + SHA1 string + ID string + Info map[string]string +} + +func mkRange(offset, size int64) string { + if offset == 0 && size == 0 { + return "" + } + if size == 0 { + return fmt.Sprintf("bytes=%d-", offset) + } + return fmt.Sprintf("bytes=%d-%d", offset, offset+size-1) +} + +// DownloadFileByName wraps b2_download_file_by_name. +func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, size int64) (*FileReader, error) { + uri := fmt.Sprintf("%s/file/%s/%s", b.b2.DownloadURI, b.Name, name) + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", b.b2.authToken) + req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1))) + req.Header.Set("X-Blazer-Method", "b2_download_file_by_name") + rng := mkRange(offset, size) + if rng != "" { + req.Header.Set("Range", rng) + } + cancel := make(chan struct{}) + req.Cancel = cancel + logRequest(req, nil) + ch := makeNetRequest(req, b.b2.opts.getTransport()) + var reply httpReply + select { + case reply = <-ch: + case <-ctx.Done(): + close(cancel) + return nil, ctx.Err() + } + if reply.err != nil { + return nil, reply.err + } + resp := reply.resp + logResponse(resp, nil) + if resp.StatusCode != 200 && resp.StatusCode != 206 { + defer resp.Body.Close() + return nil, mkErr(resp) + } + clen, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + resp.Body.Close() + return nil, err + } + info := make(map[string]string) + for key := range resp.Header { + if !strings.HasPrefix(key, "X-Bz-Info-") { + continue + } + name, err := unescape(strings.TrimPrefix(key, "X-Bz-Info-")) + if err != nil { + resp.Body.Close() + return nil, err + } + val, err := unescape(resp.Header.Get(key)) + if err != nil { + resp.Body.Close() + return nil, err + } + info[name] = val + } + return &FileReader{ + ReadCloser: resp.Body, + SHA1: resp.Header.Get("X-Bz-Content-Sha1"), + ID: resp.Header.Get("X-Bz-File-Id"), + ContentType: resp.Header.Get("Content-Type"), + ContentLength: int(clen), + Info: info, + }, nil +} + +// HideFile wraps b2_hide_file. +func (b *Bucket) HideFile(ctx context.Context, name string) (*File, error) { + b2req := &b2types.HideFileRequest{ + BucketID: b.id, + File: name, + } + b2resp := &b2types.HideFileResponse{} + headers := map[string]string{ + "Authorization": b.b2.authToken, + } + if err := b.b2.opts.makeRequest(ctx, "b2_hide_file", "POST", b.b2.apiURI+b2types.V1api+"b2_hide_file", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + return &File{ + Status: b2resp.Action, + Name: name, + Timestamp: millitime(b2resp.Timestamp), + b2: b.b2, + id: b2resp.ID, + }, nil +} + +// FileInfo holds information about a specific file. +type FileInfo struct { + Name string + SHA1 string + ID string + Size int64 + ContentType string + Info map[string]string + Status string + Timestamp time.Time +} + +// GetFileInfo wraps b2_get_file_info. +func (f *File) GetFileInfo(ctx context.Context) (*FileInfo, error) { + b2req := &b2types.GetFileInfoRequest{ + ID: f.id, + } + b2resp := &b2types.GetFileInfoResponse{} + headers := map[string]string{ + "Authorization": f.b2.authToken, + } + if err := f.b2.opts.makeRequest(ctx, "b2_get_file_info", "POST", f.b2.apiURI+b2types.V1api+"b2_get_file_info", b2req, b2resp, headers, nil); err != nil { + return nil, err + } + f.Status = b2resp.Action + f.Name = b2resp.Name + f.Timestamp = millitime(b2resp.Timestamp) + f.Info = &FileInfo{ + Name: b2resp.Name, + SHA1: b2resp.SHA1, + Size: b2resp.Size, + ContentType: b2resp.ContentType, + Info: b2resp.Info, + Status: b2resp.Action, + ID: b2resp.FileID, + Timestamp: millitime(b2resp.Timestamp), + } + return f.Info, nil +} diff --git a/vendor/github.com/minio/blazer/base/strings.go b/vendor/github.com/minio/blazer/base/strings.go new file mode 100644 index 000000000..88e615f3e --- /dev/null +++ b/vendor/github.com/minio/blazer/base/strings.go @@ -0,0 +1,81 @@ +// Copyright 2017, Google +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package base + +import ( + "bytes" + "errors" + "fmt" +) + +func noEscape(c byte) bool { + switch c { + case '.', '_', '-', '/', '~', '!', '$', '\'', '(', ')', '*', ';', '=', ':', '@': + return true + } + return false +} + +func escape(s string) string { + // cribbed from url.go, kinda + b := &bytes.Buffer{} + for i := 0; i < len(s); i++ { + switch c := s[i]; { + case c == '/': + b.WriteByte(c) + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9': + b.WriteByte(c) + case noEscape(c): + b.WriteByte(c) + default: + fmt.Fprintf(b, "%%%X", c) + } + } + return b.String() +} + +func unescape(s string) (string, error) { + b := &bytes.Buffer{} + for i := 0; i < len(s); i++ { + c := s[i] + switch c { + case '/': + b.WriteString("/") + case '+': + b.WriteString(" ") + case '%': + if len(s)-i < 3 { + return "", errors.New("unescape: bad encoding") + } + b.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2])) + i += 2 + default: + b.WriteByte(c) + } + } + return b.String(), nil +} + +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} diff --git a/vendor/github.com/minio/blazer/internal/b2types/b2types.go b/vendor/github.com/minio/blazer/internal/b2types/b2types.go new file mode 100644 index 000000000..c1f78c47c --- /dev/null +++ b/vendor/github.com/minio/blazer/internal/b2types/b2types.go @@ -0,0 +1,255 @@ +// Copyright 2016, Google +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b2types implements internal types common to the B2 API. +package b2types + +// You know what would be amazing? If I could autogen this from like a JSON +// file. Wouldn't that be amazing? That would be amazing. + +const ( + V1api = "/b2api/v1/" +) + +type ErrorMessage struct { + Status int `json:"status"` + Code string `json:"code"` + Msg string `json:"message"` +} + +type AuthorizeAccountResponse struct { + AccountID string `json:"accountId"` + AuthToken string `json:"authorizationToken"` + URI string `json:"apiUrl"` + DownloadURI string `json:"downloadUrl"` + MinPartSize int `json:"minimumPartSize"` +} + +type LifecycleRule struct { + DaysHiddenUntilDeleted int `json:"daysFromHidingToDeleting,omitempty"` + DaysNewUntilHidden int `json:"daysFromUploadingToHiding,omitempty"` + Prefix string `json:"fileNamePrefix"` +} + +type CreateBucketRequest struct { + AccountID string `json:"accountId"` + Name string `json:"bucketName"` + Type string `json:"bucketType"` + Info map[string]string `json:"bucketInfo"` + LifecycleRules []LifecycleRule `json:"lifecycleRules"` +} + +type CreateBucketResponse struct { + BucketID string `json:"bucketId"` + Name string `json:"bucketName"` + Type string `json:"bucketType"` + Info map[string]string `json:"bucketInfo"` + LifecycleRules []LifecycleRule `json:"lifecycleRules"` + Revision int `json:"revision"` +} + +type DeleteBucketRequest struct { + AccountID string `json:"accountId"` + BucketID string `json:"bucketId"` +} + +type ListBucketsRequest struct { + AccountID string `json:"accountId"` +} + +type ListBucketsResponse struct { + Buckets []CreateBucketResponse `json:"buckets"` +} + +type UpdateBucketRequest struct { + AccountID string `json:"accountId"` + BucketID string `json:"bucketId"` + // bucketName is a required field according to + // https://www.backblaze.com/b2/docs/b2_update_bucket.html. + // + // However, actually setting it returns 400: unknown field in + // com.backblaze.modules.b2.data.UpdateBucketRequest: bucketName + // + //Name string `json:"bucketName"` + Type string `json:"bucketType,omitempty"` + Info map[string]string `json:"bucketInfo,omitempty"` + LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"` + IfRevisionIs int `json:"ifRevisionIs,omitempty"` +} + +type UpdateBucketResponse CreateBucketResponse + +type GetUploadURLRequest struct { + BucketID string `json:"bucketId"` +} + +type GetUploadURLResponse struct { + URI string `json:"uploadUrl"` + Token string `json:"authorizationToken"` +} + +type UploadFileResponse struct { + FileID string `json:"fileId"` + Timestamp int64 `json:"uploadTimestamp"` + Action string `json:"action"` +} + +type DeleteFileVersionRequest struct { + Name string `json:"fileName"` + FileID string `json:"fileId"` +} + +type StartLargeFileRequest struct { + BucketID string `json:"bucketId"` + Name string `json:"fileName"` + ContentType string `json:"contentType"` + Info map[string]string `json:"fileInfo,omitempty"` +} + +type StartLargeFileResponse struct { + ID string `json:"fileId"` +} + +type CancelLargeFileRequest struct { + ID string `json:"fileId"` +} + +type ListUnfinishedLargeFilesRequest struct { + BucketID string `json:"bucketId"` + Continuation string `json:"startFileId,omitempty"` + Count int `json:"maxFileCount,omitempty"` +} + +type ListUnfinishedLargeFilesResponse struct { + NextID string `json:"nextFileId"` + Files []struct { + AccountID string `json:"accountId"` + BucketID string `json:"bucketId"` + Name string `json:"fileName"` + ID string `json:"fileId"` + Timestamp int64 `json:"uploadTimestamp"` + ContentType string `json:"contentType"` + Info map[string]string `json:"fileInfo,omitempty"` + } `json:"files"` +} + +type ListPartsRequest struct { + ID string `json:"fileId"` + Start int `json:"startPartNumber"` + Count int `json:"maxPartCount"` +} + +type ListPartsResponse struct { + Next int `json:"nextPartNumber"` + Parts []struct { + ID string `json:"fileId"` + Number int `json:"partNumber"` + SHA1 string `json:"contentSha1"` + Size int64 `json:"contentLength"` + } `json:"parts"` +} + +type getUploadPartURLRequest struct { + ID string `json:"fileId"` +} + +type getUploadPartURLResponse struct { + URL string `json:"uploadUrl"` + Token string `json:"authorizationToken"` +} + +type UploadPartResponse struct { + ID string `json:"fileId"` + PartNumber int `json:"partNumber"` + Size int64 `json:"contentLength"` + SHA1 string `json:"contentSha1"` +} + +type FinishLargeFileRequest struct { + ID string `json:"fileId"` + Hashes []string `json:"partSha1Array"` +} + +type FinishLargeFileResponse struct { + Name string `json:"fileName"` + FileID string `json:"fileId"` + Timestamp int64 `json:"uploadTimestamp"` + Action string `json:"action"` +} + +type ListFileNamesRequest struct { + BucketID string `json:"bucketId"` + Count int `json:"maxFileCount"` + Continuation string `json:"startFileName,omitempty"` + Prefix string `json:"prefix,omitempty"` + Delimiter string `json:"delimiter,omitempty"` +} + +type ListFileNamesResponse struct { + Continuation string `json:"nextFileName"` + Files []GetFileInfoResponse `json:"files"` +} + +type ListFileVersionsRequest struct { + BucketID string `json:"bucketId"` + Count int `json:"maxFileCount"` + StartName string `json:"startFileName,omitempty"` + StartID string `json:"startFileId,omitempty"` + Prefix string `json:"prefix,omitempty"` + Delimiter string `json:"delimiter,omitempty"` +} + +type ListFileVersionsResponse struct { + NextName string `json:"nextFileName"` + NextID string `json:"nextFileId"` + Files []GetFileInfoResponse `json:"files"` +} + +type HideFileRequest struct { + BucketID string `json:"bucketId"` + File string `json:"fileName"` +} + +type HideFileResponse struct { + ID string `json:"fileId"` + Timestamp int64 `json:"uploadTimestamp"` + Action string `json:"action"` +} + +type GetFileInfoRequest struct { + ID string `json:"fileId"` +} + +type GetFileInfoResponse struct { + FileID string `json:"fileId"` + Name string `json:"fileName"` + SHA1 string `json:"contentSha1"` + Size int64 `json:"contentLength"` + ContentType string `json:"contentType"` + Info map[string]string `json:"fileInfo"` + Action string `json:"action"` + Timestamp int64 `json:"uploadTimestamp"` +} + +type GetDownloadAuthorizationRequest struct { + BucketID string `json:"bucketId"` + Prefix string `json:"fileNamePrefix"` + Valid int `json:"validDurationInSeconds"` +} + +type GetDownloadAuthorizationResponse struct { + BucketID string `json:"bucketId"` + Prefix string `json:"fileNamePrefix"` + Token string `json:"authorizationToken"` +} diff --git a/vendor/github.com/minio/blazer/internal/blog/blog.go b/vendor/github.com/minio/blazer/internal/blog/blog.go new file mode 100644 index 000000000..6ffe5cbf0 --- /dev/null +++ b/vendor/github.com/minio/blazer/internal/blog/blog.go @@ -0,0 +1,54 @@ +// Copyright 2017, Google +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package blog implements a private logger, in the manner of glog, without +// polluting the flag namespace or leaving files all over /tmp. +// +// It has almost no features, and a bunch of global state. +package blog + +import ( + "log" + "os" + "strconv" +) + +var level int32 + +type Verbose bool + +func init() { + lvl := os.Getenv("B2_LOG_LEVEL") + i, err := strconv.ParseInt(lvl, 10, 32) + if err != nil { + return + } + level = int32(i) +} + +func (v Verbose) Info(a ...interface{}) { + if v { + log.Print(a...) + } +} + +func (v Verbose) Infof(format string, a ...interface{}) { + if v { + log.Printf(format, a...) + } +} + +func V(target int32) Verbose { + return Verbose(target <= level) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 64bf1f1e4..c328e234e 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -297,12 +297,6 @@ "revision": "42c364ba490082e4815b5222728711b3440603eb", "revisionTime": "2017-01-13T15:16:12Z" }, - { - "checksumSHA1": "2UmMbNHc8FBr98mJFN1k8ISOIHk=", - "path": "github.com/garyburd/redigo/internal", - "revision": "0d253a66e6e1349f4581d6d2b300ee434ee2da9f", - "revisionTime": "2017-02-16T21:49:44Z" - }, { "checksumSHA1": "+IH9gXMht4fL/fxKRZ4sqGBps1g=", "path": "github.com/go-ini/ini", @@ -590,6 +584,24 @@ "revision": "db96a2b759cdef4f11a34506a42eb8d1290c598e", "revisionTime": "2016-07-26T03:20:27Z" }, + { + "checksumSHA1": "uTShVxdYNwW+3WI6SfJwOc/LQgo=", + "path": "github.com/minio/blazer/base", + "revision": "2081f5bf046503f576d8712253724fbf2950fffe", + "revisionTime": "2017-11-26T20:28:54Z" + }, + { + "checksumSHA1": "ucCxupZ1gyxvFsBg5igP13dySLI=", + "path": "github.com/minio/blazer/internal/b2types", + "revision": "2081f5bf046503f576d8712253724fbf2950fffe", + "revisionTime": "2017-11-26T20:28:54Z" + }, + { + "checksumSHA1": "zgBbPwwuUH2sxz8smOzOA9TrD5g=", + "path": "github.com/minio/blazer/internal/blog", + "revision": "2081f5bf046503f576d8712253724fbf2950fffe", + "revisionTime": "2017-11-26T20:28:54Z" + }, { "checksumSHA1": "fUWokilZyc1QDKnIgCDJE8n1S9U=", "path": "github.com/minio/cli",