From 789270af3cb7347c4afde9bcb7e649325f96ae78 Mon Sep 17 00:00:00 2001 From: Nitish Tiwari Date: Thu, 28 Sep 2017 20:40:38 +0530 Subject: [PATCH] Vendorize latest minio-go (#4989) As minio-go behavior is fixed to treat empty byte arrays and nil byte arrays in the same manner. These changes are needed in minio to address the PutObject failure for S3 Gateway. Fixes: https://github.com/minio/minio/issues/4974, https://github.com/minio/minio-java/issues/615 --- cmd/gateway-s3-anonymous.go | 2 +- cmd/gateway-s3.go | 14 +- .../github.com/minio/minio-go/MAINTAINERS.md | 28 +- vendor/github.com/minio/minio-go/README.md | 13 +- .../minio/minio-go/api-compose-object.go | 19 +- .../minio/minio-go/api-get-object-context.go | 24 + .../minio/minio-go/api-get-object-file.go | 14 +- .../minio/minio-go/api-get-object.go | 18 +- .../minio/minio-go/api-get-policy.go | 3 +- vendor/github.com/minio/minio-go/api-list.go | 11 +- .../minio/minio-go/api-notification.go | 5 +- .../minio/minio-go/api-put-bucket.go | 9 +- .../minio/minio-go/api-put-object-common.go | 5 +- .../minio/minio-go/api-put-object-context.go | 38 + .../minio-go/api-put-object-encrypted.go | 17 +- .../minio-go/api-put-object-file-context.go | 63 + .../minio/minio-go/api-put-object-file.go | 48 +- .../minio-go/api-put-object-multipart.go | 52 +- .../minio-go/api-put-object-streaming.go | 78 +- .../minio/minio-go/api-put-object.go | 228 +- .../github.com/minio/minio-go/api-remove.go | 13 +- vendor/github.com/minio/minio-go/api-stat.go | 5 +- vendor/github.com/minio/minio-go/api.go | 18 +- vendor/github.com/minio/minio-go/appveyor.yml | 1 + vendor/github.com/minio/minio-go/constants.go | 2 +- vendor/github.com/minio/minio-go/core.go | 27 +- .../minio/minio-go/functional_tests.go | 5889 +++++++++++++++++ .../minio/minio-go/transport_1_5.go | 39 - .../minio/minio-go/transport_1_6.go | 40 - vendor/github.com/minio/minio-go/utils.go | 38 + vendor/vendor.json | 6 +- 31 files changed, 6331 insertions(+), 436 deletions(-) create mode 100644 vendor/github.com/minio/minio-go/api-get-object-context.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-context.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-file-context.go create mode 100644 vendor/github.com/minio/minio-go/functional_tests.go delete mode 100644 vendor/github.com/minio/minio-go/transport_1_5.go delete mode 100644 vendor/github.com/minio/minio-go/transport_1_6.go diff --git a/cmd/gateway-s3-anonymous.go b/cmd/gateway-s3-anonymous.go index b5f45fbbc..6bdec31e4 100644 --- a/cmd/gateway-s3-anonymous.go +++ b/cmd/gateway-s3-anonymous.go @@ -45,7 +45,7 @@ func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data delete(metadata, "etag") } - oi, err := l.anonClient.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata)) + oi, err := l.anonClient.PutObject(bucket, object, data, size, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata)) if err != nil { return objInfo, s3ToObjectError(traceError(err), bucket, object) } diff --git a/cmd/gateway-s3.go b/cmd/gateway-s3.go index c18a9adcf..3ebc5aa5f 100644 --- a/cmd/gateway-s3.go +++ b/cmd/gateway-s3.go @@ -341,7 +341,7 @@ func (l *s3Objects) PutObject(bucket string, object string, data *HashReader, me return objInfo, s3ToObjectError(traceError(err), bucket, object) } delete(metadata, "etag") - oi, err := l.Client.PutObject(bucket, object, data.Size(), data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata)) + oi, err := l.Client.PutObject(bucket, object, data, data.Size(), md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata)) if err != nil { return objInfo, s3ToObjectError(traceError(err), bucket, object) } @@ -448,17 +448,19 @@ func fromMinioClientMetadata(metadata map[string][]string) map[string]string { } // toMinioClientMetadata converts metadata to map[string][]string -func toMinioClientMetadata(metadata map[string]string) map[string][]string { - mm := map[string][]string{} +func toMinioClientMetadata(metadata map[string]string) map[string]string { + mm := map[string]string{} for k, v := range metadata { - mm[http.CanonicalHeaderKey(k)] = []string{v} + mm[http.CanonicalHeaderKey(k)] = v } return mm } // NewMultipartUpload upload object in multiple parts func (l *s3Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) { - return l.Client.NewMultipartUpload(bucket, object, toMinioClientMetadata(metadata)) + // Create PutObject options + opts := minio.PutObjectOptions{UserMetadata: metadata} + return l.Client.NewMultipartUpload(bucket, object, opts) } // CopyObjectPart copy part of object to other bucket and object @@ -489,7 +491,7 @@ func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, return pi, err } - info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data.Size(), data, md5HexBytes, sha256sumBytes) + info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), md5HexBytes, sha256sumBytes) if err != nil { return pi, err } diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md index 6dbef6265..17973078e 100644 --- a/vendor/github.com/minio/minio-go/MAINTAINERS.md +++ b/vendor/github.com/minio/minio-go/MAINTAINERS.md @@ -5,15 +5,31 @@ Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) ### Making new releases +Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key. +```sh +$ export GNUPGHOME=/media/${USER}/minio/trusted +$ git tag -s 4.0.0 +$ git push +$ git push --tags +``` -Edit `libraryVersion` constant in `api.go`. +### Update version +Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. -``` +```sh $ grep libraryVersion api.go - libraryVersion = "0.3.0" + libraryVersion = "4.0.1" ``` +Commit your changes +``` +$ git commit -a -m "Update version for next release" --author "Minio Trusted " +``` + +### Announce +Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@minio.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. + +To generate `changelog` +```sh +$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. ``` -$ git tag 0.3.0 -$ git push --tags -``` \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md index 5eb6656d5..b37fc9a7f 100644 --- a/vendor/github.com/minio/minio-go/README.md +++ b/vendor/github.com/minio/minio-go/README.md @@ -55,6 +55,7 @@ func main() { } log.Printf("%#v\n", minioClient) // minioClient is now setup +} ``` ## Quick Start Example - File Uploader @@ -105,7 +106,7 @@ func main() { contentType := "application/zip" // Upload the zip file with FPutObject - n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType) + n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) if err != nil { log.Fatalln(err) } @@ -152,10 +153,13 @@ The full API Reference is available here. ### API Reference : File Object Operations * [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) * [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) - +* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) +* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) ### API Reference : Object Operations * [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) * [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) +* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext) +* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext) * [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) * [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) * [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) @@ -204,10 +208,13 @@ The full API Reference is available here. ### Full Examples : File Object Operations * [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) * [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) - +* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) +* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) ### Full Examples : Object Operations * [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) * [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) +* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) * [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) * [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) * [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go index 4fa88b818..2a11cb4c1 100644 --- a/vendor/github.com/minio/minio-go/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/api-compose-object.go @@ -17,6 +17,7 @@ package minio import ( + "context" "encoding/base64" "fmt" "net/http" @@ -268,7 +269,7 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s // uploadPartCopy - helper function to create a part in a multipart // upload via an upload-part-copy request // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html -func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int, +func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, headers http.Header) (p CompletePart, err error) { // Build query parameters @@ -277,7 +278,7 @@ func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int, urlValues.Set("uploadId", uploadID) // Send upload-part-copy request - resp, err := c.executeMethod("PUT", requestMetadata{ + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ bucketName: bucket, objectName: object, customHeader: headers, @@ -311,7 +312,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { if len(srcs) < 1 || len(srcs) > maxPartsCount { return ErrInvalidArgument("There must be as least one and up to 10000 source objects.") } - + ctx := context.Background() srcSizes := make([]int64, len(srcs)) var totalSize, size, totalParts int64 var srcUserMeta map[string]string @@ -396,7 +397,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { } // Send copy request - resp, err := c.executeMethod("PUT", requestMetadata{ + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ bucketName: dst.bucket, objectName: dst.object, customHeader: h, @@ -426,11 +427,11 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { if len(userMeta) == 0 && len(srcs) == 1 { metaMap = srcUserMeta } - metaHeaders := make(map[string][]string) + metaHeaders := make(map[string]string) for k, v := range metaMap { - metaHeaders[k] = append(metaHeaders[k], v) + metaHeaders[k] = v } - uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders) + uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders}) if err != nil { return fmt.Errorf("Error creating new upload: %v", err) } @@ -457,7 +458,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { fmt.Sprintf("bytes=%d-%d", start, end)) // make upload-part-copy request - complPart, err := c.uploadPartCopy(dst.bucket, + complPart, err := c.uploadPartCopy(ctx, dst.bucket, dst.object, uploadID, partIndex, h) if err != nil { return fmt.Errorf("Error in upload-part-copy - %v", err) @@ -468,7 +469,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { } // 3. Make final complete-multipart request. - _, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID, + _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID, completeMultipartUpload{Parts: objParts}) if err != nil { err = fmt.Errorf("Error in complete-multipart request - %v", err) diff --git a/vendor/github.com/minio/minio-go/api-get-object-context.go b/vendor/github.com/minio/minio-go/api-get-object-context.go new file mode 100644 index 000000000..1ff6e06df --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-object-context.go @@ -0,0 +1,24 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "context" + +// GetObjectWithContext - returns an seekable, readable object. +func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string) (*Object, error) { + return c.getObjectWithContext(ctx, bucketName, objectName) +} diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go index c4193e934..fff6d1f99 100644 --- a/vendor/github.com/minio/minio-go/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/api-get-object-file.go @@ -21,11 +21,23 @@ import ( "os" "path/filepath" + "context" + "github.com/minio/minio-go/pkg/s3utils" ) +// FGetObjectWithContext - download contents of an object to a local file. +func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string) error { + return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath) +} + // FGetObject - download contents of an object to a local file. func (c Client) FGetObject(bucketName, objectName, filePath string) error { + return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath) +} + +// fGetObjectWithContext - fgetObject wrapper function with context +func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -88,7 +100,7 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error { } // Seek to current position for incoming reader. - objectReader, objectStat, err := c.getObject(bucketName, objectName, reqHeaders) + objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, reqHeaders) if err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go index 9bd784ffa..02b3c3ff6 100644 --- a/vendor/github.com/minio/minio-go/api-get-object.go +++ b/vendor/github.com/minio/minio-go/api-get-object.go @@ -17,6 +17,7 @@ package minio import ( + "context" "errors" "fmt" "io" @@ -57,6 +58,11 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria // GetObject - returns an seekable, readable object. func (c Client) GetObject(bucketName, objectName string) (*Object, error) { + return c.getObjectWithContext(context.Background(), bucketName, objectName) +} + +// GetObject wrapper function that accepts a request context +func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string) (*Object, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -110,14 +116,14 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) { // Do not set objectInfo from the first readAt request because it will not get // the whole object. reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders) + httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, reqHeaders) } else { if req.Offset > 0 { reqHeaders.SetRange(req.Offset, 0) } // First request is a Read request. - httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders) + httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, reqHeaders) } if err != nil { resCh <- getResponse{ @@ -195,14 +201,14 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) { if req.isReadAt { // Range is set with respect to the offset and length of the buffer requested. reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - httpReader, _, err = c.getObject(bucketName, objectName, reqHeaders) + httpReader, _, err = c.getObject(ctx, bucketName, objectName, reqHeaders) } else { // Range is set with respect to the offset. if req.Offset > 0 { reqHeaders.SetRange(req.Offset, 0) } - httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders) + httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, reqHeaders) } if err != nil { resCh <- getResponse{ @@ -626,7 +632,7 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- // // For more information about the HTTP Range header. // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { +func (c Client) getObject(ctx context.Context, bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { // Validate input arguments. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, ObjectInfo{}, err @@ -642,7 +648,7 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade } // Execute GET on objectName. - resp, err := c.executeMethod("GET", requestMetadata{ + resp, err := c.executeMethod(ctx, "GET", requestMetadata{ bucketName: bucketName, objectName: objectName, customHeader: customHeader, diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go index 10ccdc66b..740871d73 100644 --- a/vendor/github.com/minio/minio-go/api-get-policy.go +++ b/vendor/github.com/minio/minio-go/api-get-policy.go @@ -17,6 +17,7 @@ package minio import ( + "context" "encoding/json" "io/ioutil" "net/http" @@ -79,7 +80,7 @@ func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, e urlValues.Set("policy", "") // Execute GET on bucket to list objects. - resp, err := c.executeMethod("GET", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentSHA256Bytes: emptySHA256, diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go index 6de1fe9b3..40fca3e65 100644 --- a/vendor/github.com/minio/minio-go/api-list.go +++ b/vendor/github.com/minio/minio-go/api-list.go @@ -17,6 +17,7 @@ package minio import ( + "context" "errors" "fmt" "net/http" @@ -38,7 +39,7 @@ import ( // func (c Client) ListBuckets() ([]BucketInfo, error) { // Execute GET on service. - resp, err := c.executeMethod("GET", requestMetadata{contentSHA256Bytes: emptySHA256}) + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Bytes: emptySHA256}) defer closeResponse(resp) if err != nil { return nil, err @@ -215,7 +216,7 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) // Execute GET on bucket to list objects. - resp, err := c.executeMethod("GET", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentSHA256Bytes: emptySHA256, @@ -393,7 +394,7 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) // Execute GET on bucket to list objects. - resp, err := c.executeMethod("GET", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentSHA256Bytes: emptySHA256, @@ -572,7 +573,7 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) // Execute GET on bucketName to list multipart uploads. - resp, err := c.executeMethod("GET", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentSHA256Bytes: emptySHA256, @@ -690,7 +691,7 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) // Execute GET on objectName to get list of parts. - resp, err := c.executeMethod("GET", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go index 25a283af5..7671ce69f 100644 --- a/vendor/github.com/minio/minio-go/api-notification.go +++ b/vendor/github.com/minio/minio-go/api-notification.go @@ -18,6 +18,7 @@ package minio import ( "bufio" + "context" "encoding/json" "io" "net/http" @@ -46,7 +47,7 @@ func (c Client) getBucketNotification(bucketName string) (BucketNotification, er urlValues.Set("notification", "") // Execute GET on bucket to list objects. - resp, err := c.executeMethod("GET", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentSHA256Bytes: emptySHA256, @@ -170,7 +171,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even urlValues["events"] = events // Execute GET on bucket to list objects. - resp, err := c.executeMethod("GET", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentSHA256Bytes: emptySHA256, diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go index fd37dc192..d4a702aeb 100644 --- a/vendor/github.com/minio/minio-go/api-put-bucket.go +++ b/vendor/github.com/minio/minio-go/api-put-bucket.go @@ -19,6 +19,7 @@ package minio import ( "bytes" + "context" "encoding/json" "encoding/xml" "fmt" @@ -82,7 +83,7 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) { } // Execute PUT to create a new bucket. - resp, err := c.executeMethod("PUT", reqMetadata) + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) defer closeResponse(resp) if err != nil { return err @@ -170,7 +171,7 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces } // Execute PUT to upload a new bucket policy. - resp, err := c.executeMethod("PUT", reqMetadata) + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) defer closeResponse(resp) if err != nil { return err @@ -195,7 +196,7 @@ func (c Client) removeBucketPolicy(bucketName string) error { urlValues.Set("policy", "") // Execute DELETE on objectName. - resp, err := c.executeMethod("DELETE", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentSHA256Bytes: emptySHA256, @@ -235,7 +236,7 @@ func (c Client) SetBucketNotification(bucketName string, bucketNotification Buck } // Execute PUT to upload a new bucket notification. - resp, err := c.executeMethod("PUT", reqMetadata) + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) defer closeResponse(resp) if err != nil { return err diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go index 0158bc1d8..850a4567a 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/api-put-object-common.go @@ -17,6 +17,7 @@ package minio import ( + "context" "io" "math" "os" @@ -77,7 +78,7 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las // getUploadID - fetch upload id if already present for an object name // or initiate a new request to fetch a new upload id. -func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) { +func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err @@ -87,7 +88,7 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][ } // Initiate multipart upload for an object. - initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData) + initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) if err != nil { return "", err } diff --git a/vendor/github.com/minio/minio-go/api-put-object-context.go b/vendor/github.com/minio/minio-go/api-put-object-context.go new file mode 100644 index 000000000..7896dd3dc --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-context.go @@ -0,0 +1,38 @@ +/* +* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package minio + +import ( + "context" + "io" +) + +// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation. +func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions) (n int64, err error) { + err = opts.validate() + if err != nil { + return 0, err + } + if opts.EncryptMaterials != nil { + if err = opts.EncryptMaterials.SetupEncryptMode(reader); err != nil { + return 0, err + } + return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, opts.EncryptMaterials, opts) + } + return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go index 534a21ecf..4ed865735 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go +++ b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go @@ -17,13 +17,14 @@ package minio import ( + "context" "io" "github.com/minio/minio-go/pkg/encrypt" ) // PutEncryptedObject - Encrypt and store object. -func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int64, err error) { if encryptMaterials == nil { return 0, ErrInvalidArgument("Unable to recognize empty encryption properties") @@ -33,14 +34,10 @@ func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Read return 0, err } - if metadata == nil { - metadata = make(map[string][]string) - } - - // Set the necessary encryption headers, for future decryption. - metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()} - metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()} - metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()} + return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, -1, PutObjectOptions{EncryptMaterials: encryptMaterials}) +} - return c.putObjectMultipartStreamNoLength(bucketName, objectName, encryptMaterials, metadata, progress) +// FPutEncryptedObject - Encrypt and store an object with contents from file at filePath. +func (c Client) FPutEncryptedObject(bucketName, objectName, filePath string, encryptMaterials encrypt.Materials) (n int64, err error) { + return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, PutObjectOptions{EncryptMaterials: encryptMaterials}) } diff --git a/vendor/github.com/minio/minio-go/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/api-put-object-file-context.go new file mode 100644 index 000000000..4e4620bf7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-file-context.go @@ -0,0 +1,63 @@ +/* +* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ + +package minio + +import ( + "context" + "mime" + "os" + "path/filepath" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. +func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return 0, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return 0, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Set contentType based on filepath extension if not given or default + // value of "application/octet-stream" if the extension has no associated type. + if opts.ContentType == "" { + if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { + opts.ContentType = "application/octet-stream" + } + } + return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go index 81cdf5c2c..7804fd3cd 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-file.go +++ b/vendor/github.com/minio/minio-go/api-put-object-file.go @@ -17,50 +17,10 @@ package minio import ( - "mime" - "os" - "path/filepath" - - "github.com/minio/minio-go/pkg/s3utils" + "context" ) -// FPutObject - Create an object in a bucket, with contents from file at filePath. -func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Open the referenced file. - fileReader, err := os.Open(filePath) - // If any error fail quickly here. - if err != nil { - return 0, err - } - defer fileReader.Close() - - // Save the file stat. - fileStat, err := fileReader.Stat() - if err != nil { - return 0, err - } - - // Save the file size. - fileSize := fileStat.Size() - - objMetadata := make(map[string][]string) - - // Set contentType based on filepath extension if not given or default - // value of "binary/octet-stream" if the extension has no associated type. - if contentType == "" { - if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" { - contentType = "application/octet-stream" - } - } - - objMetadata["Content-Type"] = []string{contentType} - return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil) +// FPutObject - Create an object in a bucket, with contents from file at filePath +func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { + return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) } diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go index aefeb5f26..6401de37e 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -18,6 +18,7 @@ package minio import ( "bytes" + "context" "encoding/xml" "fmt" "io" @@ -32,9 +33,9 @@ import ( "github.com/minio/minio-go/pkg/s3utils" ) -func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, - metadata map[string][]string, progress io.Reader) (n int64, err error) { - n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, metadata, progress) +func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, + opts PutObjectOptions) (n int64, err error) { + n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not @@ -45,13 +46,13 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) } } return n, err } -func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -74,14 +75,14 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader } // Initiate a new multipart upload. - uploadID, err := c.newUploadID(bucketName, objectName, metadata) + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return 0, err } defer func() { if err != nil { - c.abortMultipartUpload(bucketName, objectName, uploadID) + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() @@ -117,12 +118,12 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader // Update progress reader appropriately to the latest offset // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), progress) + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Proceed to upload the part. var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber, - hashSums["md5"], hashSums["sha256"], int64(length), metadata) + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + hashSums["md5"], hashSums["sha256"], int64(length), opts.UserMetadata) if err != nil { return totalUploadedSize, err } @@ -158,7 +159,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil { + if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { return totalUploadedSize, err } @@ -167,7 +168,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. -func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) { +func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err @@ -181,17 +182,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata urlValues.Set("uploads", "") // Set ContentType header. - customHeader := make(http.Header) - for k, v := range metadata { - if len(v) > 0 { - customHeader.Set(k, v[0]) - } - } - - // Set a default content-type header if the latter is not provided - if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 { - customHeader.Set("Content-Type", "application/octet-stream") - } + customHeader := opts.Header() reqMetadata := requestMetadata{ bucketName: bucketName, @@ -201,7 +192,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata } // Execute POST on an objectName to initiate multipart upload. - resp, err := c.executeMethod("POST", reqMetadata) + resp, err := c.executeMethod(ctx, "POST", reqMetadata) defer closeResponse(resp) if err != nil { return initiateMultipartUploadResult{}, err @@ -223,8 +214,8 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata const serverEncryptionKeyPrefix = "x-amz-server-side-encryption" // uploadPart - Uploads a part in a multipart upload. -func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) { +func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, + partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string]string) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectPart{}, err @@ -257,7 +248,7 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re for k, v := range metadata { if len(v) > 0 { if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) { - customHeader.Set(k, v[0]) + customHeader.Set(k, v) } } } @@ -274,7 +265,7 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re } // Execute PUT on each part. - resp, err := c.executeMethod("PUT", reqMetadata) + resp, err := c.executeMethod(ctx, "PUT", reqMetadata) defer closeResponse(resp) if err != nil { return ObjectPart{}, err @@ -295,7 +286,7 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re } // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. -func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, +func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -308,7 +299,6 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, // Initialize url queries. urlValues := make(url.Values) urlValues.Set("uploadId", uploadID) - // Marshal complete multipart body. completeMultipartUploadBytes, err := xml.Marshal(complete) if err != nil { @@ -327,7 +317,7 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, } // Execute POST to complete multipart upload for an objectName. - resp, err := c.executeMethod("POST", reqMetadata) + resp, err := c.executeMethod(ctx, "POST", reqMetadata) defer closeResponse(resp) if err != nil { return completeMultipartUploadResult{}, err diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go index 40cd5c252..994158bb9 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go @@ -17,6 +17,7 @@ package minio import ( + "context" "fmt" "io" "net/http" @@ -26,11 +27,6 @@ import ( "github.com/minio/minio-go/pkg/s3utils" ) -// PutObjectStreaming using AWS streaming signature V4 -func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) { - return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil) -} - // putObjectMultipartStream - upload a large object using // multipart upload and streaming signature for signing payload. // Comprehensive put object operation involving multipart uploads. @@ -41,8 +37,8 @@ func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Read // - *minio.Object // - Any reader which has a method 'ReadAt()' // -func (c Client) putObjectMultipartStream(bucketName, objectName string, - reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { // Verify if reader is *minio.Object, *os.File or io.ReaderAt. // NOTE: Verification of object is kept for a specific purpose @@ -50,9 +46,9 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, // It is to indicate that *minio.Object implements io.ReaderAt. // and such a functionality is used in the subsequent code path. if isFile(reader) || !isObject(reader) && isReadAt(reader) { - n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress) + n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) } else { - n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress) + n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts) } if err != nil { errResp := ToErrorResponse(err) @@ -64,7 +60,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) } } return n, err @@ -94,8 +90,8 @@ type uploadPartReq struct { // temporary files for staging all the data, these temporary files are // cleaned automatically when the caller i.e http client closes the // stream after uploading all the contents successfully. -func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string, - reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, + reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -111,7 +107,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string } // Initiate a new multipart upload. - uploadID, err := c.newUploadID(bucketName, objectName, metadata) + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return 0, err } @@ -122,7 +118,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string // to relinquish storage space. defer func() { if err != nil { - c.abortMultipartUpload(bucketName, objectName, uploadID) + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() @@ -150,9 +146,8 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} } close(uploadPartsCh) - // Receive each part number from the channel allowing three parallel uploads. - for w := 1; w <= totalWorkers; w++ { + for w := 1; w <= opts.getNumThreads(); w++ { go func(partSize int64) { // Each worker will draw from the part channel and upload in parallel. for uploadReq := range uploadPartsCh { @@ -170,13 +165,13 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string } // Get a section reader on a particular offset. - sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress) + sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) // Proceed to upload the part. var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, - nil, nil, partSize, metadata) + nil, nil, partSize, opts.UserMetadata) if err != nil { uploadedPartsCh <- uploadedPartRes{ Size: 0, @@ -229,7 +224,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) if err != nil { return totalUploadedSize, err } @@ -238,8 +233,8 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string return totalUploadedSize, nil } -func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string, - reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -253,9 +248,8 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string if err != nil { return 0, err } - // Initiates a new multipart request - uploadID, err := c.newUploadID(bucketName, objectName, metadata) + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return 0, err } @@ -266,7 +260,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string // storage space. defer func() { if err != nil { - c.abortMultipartUpload(bucketName, objectName, uploadID) + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() @@ -281,17 +275,16 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { // Update progress reader appropriately to the latest offset // as we read from the source. - hookReader := newHook(reader, progress) + hookReader := newHook(reader, opts.Progress) // Proceed to upload the part. if partNumber == totalPartsCount { partSize = lastPartSize } - var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, io.LimitReader(hookReader, partSize), - partNumber, nil, nil, partSize, metadata) + partNumber, nil, nil, partSize, opts.UserMetadata) if err != nil { return totalUploadedSize, err } @@ -328,7 +321,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) if err != nil { return totalUploadedSize, err } @@ -339,7 +332,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string // putObjectNoChecksum special function used Google Cloud Storage. This special function // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -361,11 +354,11 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea // Update progress reader appropriately to the latest offset as we // read from the source. - readSeeker := newHook(reader, progress) + readSeeker := newHook(reader, opts.Progress) // This function does not calculate sha256 and md5sum for payload. // Execute put object. - st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData) + st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, nil, nil, size, opts) if err != nil { return 0, err } @@ -377,7 +370,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea // putObjectDo - executes the put object http operation. // NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) { +func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, opts PutObjectOptions) (ObjectInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err @@ -385,21 +378,8 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5 if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, err } - // Set headers. - customHeader := make(http.Header) - - // Set metadata to headers - for k, v := range metaData { - if len(v) > 0 { - customHeader.Set(k, v[0]) - } - } - - // If Content-Type is not provided, set the default application/octet-stream one - if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 { - customHeader.Set("Content-Type", "application/octet-stream") - } + customHeader := opts.Header() // Populate request metadata. reqMetadata := requestMetadata{ @@ -413,7 +393,7 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5 } // Execute PUT an objectName. - resp, err := c.executeMethod("PUT", reqMetadata) + resp, err := c.executeMethod(ctx, "PUT", reqMetadata) defer closeResponse(resp) if err != nil { return ObjectInfo{}, err diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go index 94db82593..e7607ff35 100644 --- a/vendor/github.com/minio/minio-go/api-put-object.go +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -18,119 +18,84 @@ package minio import ( "bytes" + "context" "fmt" "io" - "os" - "reflect" - "runtime" + "net/http" "runtime/debug" "sort" "strings" + "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) -// toInt - converts go value to its integer representation based -// on the value kind if it is an integer. -func toInt(value reflect.Value) (size int64) { - size = -1 - if value.IsValid() { - switch value.Kind() { - case reflect.Int: - fallthrough - case reflect.Int8: - fallthrough - case reflect.Int16: - fallthrough - case reflect.Int32: - fallthrough - case reflect.Int64: - size = value.Int() - } +// PutObjectOptions represents options specified by user for PutObject call +type PutObjectOptions struct { + UserMetadata map[string]string + Progress io.Reader + ContentType string + ContentEncoding string + ContentDisposition string + CacheControl string + EncryptMaterials encrypt.Materials + NumThreads uint +} + +// getNumThreads - gets the number of threads to be used in the multipart +// put object operation +func (opts PutObjectOptions) getNumThreads() (numThreads int) { + if opts.NumThreads > 0 { + numThreads = int(opts.NumThreads) + } else { + numThreads = totalWorkers } - return size + return } -// getReaderSize - Determine the size of Reader if available. -func getReaderSize(reader io.Reader) (size int64, err error) { - size = -1 - if reader == nil { - return -1, nil +// Header - constructs the headers from metadata entered by user in +// PutObjectOptions struct +func (opts PutObjectOptions) Header() (header http.Header) { + header = make(http.Header) + + if opts.ContentType != "" { + header["Content-Type"] = []string{opts.ContentType} + } else { + header["Content-Type"] = []string{"application/octet-stream"} } - // Verify if there is a method by name 'Size'. - sizeFn := reflect.ValueOf(reader).MethodByName("Size") - // Verify if there is a method by name 'Len'. - lenFn := reflect.ValueOf(reader).MethodByName("Len") - if sizeFn.IsValid() { - if sizeFn.Kind() == reflect.Func { - // Call the 'Size' function and save its return value. - result := sizeFn.Call([]reflect.Value{}) - if len(result) == 1 { - size = toInt(result[0]) - } - } - } else if lenFn.IsValid() { - if lenFn.Kind() == reflect.Func { - // Call the 'Len' function and save its return value. - result := lenFn.Call([]reflect.Value{}) - if len(result) == 1 { - size = toInt(result[0]) - } + if opts.ContentEncoding != "" { + header["Content-Encoding"] = []string{opts.ContentEncoding} + } + if opts.ContentDisposition != "" { + header["Content-Disposition"] = []string{opts.ContentDisposition} + } + if opts.CacheControl != "" { + header["Cache-Control"] = []string{opts.CacheControl} + } + if opts.EncryptMaterials != nil { + header[amzHeaderIV] = []string{opts.EncryptMaterials.GetIV()} + header[amzHeaderKey] = []string{opts.EncryptMaterials.GetKey()} + header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()} + } + for k, v := range opts.UserMetadata { + if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) { + header["X-Amz-Meta-"+k] = []string{v} + } else { + header[k] = []string{v} } - } else { - // Fallback to Stat() method, two possible Stat() structs exist. - switch v := reader.(type) { - case *os.File: - var st os.FileInfo - st, err = v.Stat() - if err != nil { - // Handle this case specially for "windows", - // certain files for example 'Stdin', 'Stdout' and - // 'Stderr' it is not allowed to fetch file information. - if runtime.GOOS == "windows" { - if strings.Contains(err.Error(), "GetFileInformationByHandle") { - return -1, nil - } - } - return - } - // Ignore if input is a directory, throw an error. - if st.Mode().IsDir() { - return -1, ErrInvalidArgument("Input file cannot be a directory.") - } - // Ignore 'Stdin', 'Stdout' and 'Stderr', since they - // represent *os.File type but internally do not - // implement Seekable calls. Ignore them and treat - // them like a stream with unknown length. - switch st.Name() { - case "stdin", "stdout", "stderr": - return - // Ignore read/write stream of os.Pipe() which have unknown length too. - case "|0", "|1": - return - } - var pos int64 - pos, err = v.Seek(0, 1) // SeekCurrent. - if err != nil { - return -1, err - } - size = st.Size() - pos - case *Object: - var st ObjectInfo - st, err = v.Stat() - if err != nil { - return - } - var pos int64 - pos, err = v.Seek(0, 1) // SeekCurrent. - if err != nil { - return -1, err - } - size = st.Size - pos + } + return +} + +// validate() checks if the UserMetadata map has standard headers or client side +// encryption headers and raises an error if so. +func (opts PutObjectOptions) validate() (err error) { + for k := range opts.UserMetadata { + if isStandardHeader(k) || isCSEHeader(k) { + return ErrInvalidArgument(k + " unsupported request parameter for user defined metadata") } } - // Returns the size here. - return size, err + return nil } // completedParts is a collection of parts sortable by their part numbers. @@ -152,40 +117,12 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // - For size input as -1 PutObject does a multipart Put operation // until input stream reaches EOF. Maximum object size that can // be uploaded through this operation will be 5TiB. -func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) { - return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{ - "Content-Type": []string{contentType}, - }, nil) +func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions) (n int64, err error) { + return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts) } -// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject() -// but takes the size argument explicitly, this function avoids doing reflection -// internally to figure out the size of input stream. Also if the input size is -// lesser than 0 this function returns an error. -func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { - return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress) -} - -// PutObjectWithMetadata using AWS streaming signature V4 -func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { - return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress) -} - -// PutObjectWithProgress using AWS streaming signature V4 -func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { - // Size of the object. - var size int64 - - // Get reader size. - size, err = getReaderSize(reader) - if err != nil { - return 0, err - } - - return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress) -} - -func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { // Check for largest object size allowed. if size > int64(maxMultipartPutObjectSize) { return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) @@ -194,30 +131,27 @@ func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, // NOTE: Streaming signature is not supported by GCS. if s3utils.IsGoogleEndpoint(c.endpointURL) { // Do not compute MD5 for Google Cloud Storage. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) } if c.overrideSignerType.IsV2() { if size >= 0 && size < minPartSize { - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) } - return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) } - if size < 0 { - return c.putObjectMultipartStreamNoLength(bucketName, objectName, reader, metadata, progress) + return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) } if size < minPartSize { - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) } - // For all sizes greater than 64MiB do multipart. - return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) } -func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, reader io.Reader, metadata map[string][]string, - progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -238,16 +172,15 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, if err != nil { return 0, err } - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(bucketName, objectName, metadata) + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return 0, err } defer func() { if err != nil { - c.abortMultipartUpload(bucketName, objectName, uploadID) + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() @@ -263,21 +196,20 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, for partNumber <= totalPartsCount { length, rErr := io.ReadFull(reader, buf) - if rErr == io.EOF { + if rErr == io.EOF && partNumber > 1 { break } if rErr != nil && rErr != io.ErrUnexpectedEOF { return 0, rErr } - // Update progress reader appropriately to the latest offset // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), progress) + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Proceed to upload the part. var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber, - nil, nil, int64(length), metadata) + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + nil, nil, int64(length), opts.UserMetadata) if err != nil { return totalUploadedSize, err } @@ -313,7 +245,7 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil { + if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { return totalUploadedSize, err } diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go index 3574cbc1a..1c55abfeb 100644 --- a/vendor/github.com/minio/minio-go/api-remove.go +++ b/vendor/github.com/minio/minio-go/api-remove.go @@ -18,6 +18,7 @@ package minio import ( "bytes" + "context" "encoding/xml" "io" "net/http" @@ -36,7 +37,7 @@ func (c Client) RemoveBucket(bucketName string) error { return err } // Execute DELETE on bucket. - resp, err := c.executeMethod("DELETE", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ bucketName: bucketName, contentSHA256Bytes: emptySHA256, }) @@ -66,7 +67,7 @@ func (c Client) RemoveObject(bucketName, objectName string) error { return err } // Execute DELETE on objectName. - resp, err := c.executeMethod("DELETE", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ bucketName: bucketName, objectName: objectName, contentSHA256Bytes: emptySHA256, @@ -187,7 +188,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan // Generate remove multi objects XML request removeBytes := generateRemoveMultiObjectsRequest(batch) // Execute GET on bucket to list objects. - resp, err := c.executeMethod("POST", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{ bucketName: bucketName, queryValues: urlValues, contentBody: bytes.NewReader(removeBytes), @@ -227,7 +228,7 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { } if uploadID != "" { // Upload id found, abort the incomplete multipart upload. - err := c.abortMultipartUpload(bucketName, objectName, uploadID) + err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID) if err != nil { return err } @@ -237,7 +238,7 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { // abortMultipartUpload aborts a multipart upload for the given // uploadID, all previously uploaded parts are deleted. -func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error { +func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -251,7 +252,7 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er urlValues.Set("uploadId", uploadID) // Execute DELETE on multipart upload. - resp, err := c.executeMethod("DELETE", requestMetadata{ + resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go index 5f06bfc9e..cb3017f1d 100644 --- a/vendor/github.com/minio/minio-go/api-stat.go +++ b/vendor/github.com/minio/minio-go/api-stat.go @@ -17,6 +17,7 @@ package minio import ( + "context" "net/http" "strconv" "strings" @@ -33,7 +34,7 @@ func (c Client) BucketExists(bucketName string) (bool, error) { } // Execute HEAD on bucketName. - resp, err := c.executeMethod("HEAD", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ bucketName: bucketName, contentSHA256Bytes: emptySHA256, }) @@ -108,7 +109,7 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead } // Execute HEAD on objectName. - resp, err := c.executeMethod("HEAD", requestMetadata{ + resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ bucketName: bucketName, objectName: objectName, contentSHA256Bytes: emptySHA256, diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index 4630f95f6..47e005306 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -19,6 +19,7 @@ package minio import ( "bytes" + "context" "crypto/md5" "crypto/sha256" "encoding/base64" @@ -87,7 +88,7 @@ type Client struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "3.0.2" + libraryVersion = "4.0.0" ) // User Agent should always following the below style. @@ -494,9 +495,11 @@ var successStatus = []int{ // executeMethod - instantiates a given method, and retries the // request upon any error up to maxRetries attempts in a binomially // delayed manner using a standard back off algorithm. -func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) { +func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { var isRetryable bool // Indicates if request can be retried. var bodySeeker io.Seeker // Extracted seeker from io.Reader. + var reqRetry = MaxRetry // Indicates how many times we can retry the request + if metadata.contentBody != nil { // Check if body is seekable then it is retryable. bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) @@ -504,6 +507,11 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt case os.Stdin, os.Stdout, os.Stderr: isRetryable = false } + // Retry only when reader is seekable + if !isRetryable { + reqRetry = 1 + } + // Figure out if the body can be closed - if yes // we will definitely close it upon the function // return. @@ -522,7 +530,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt // Blank indentifier is kept here on purpose since 'range' without // blank identifiers is only supported since go1.4 // https://golang.org/doc/go1.4#forrange. - for range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { + for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { // Retry executes the following function body if request has an // error until maxRetries have been exhausted, retry attempts are // performed after waiting for a given period of time in a @@ -545,6 +553,8 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt } return nil, err } + // Add context to request + req = req.WithContext(ctx) // Initiate the request. res, err = c.do(req) @@ -720,7 +730,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R } // set md5Sum for content protection. - if metadata.contentMD5Bytes != nil { + if len(metadata.contentMD5Bytes) > 0 { req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) } diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml index 4f5c1b390..0f623d3d4 100644 --- a/vendor/github.com/minio/minio-go/appveyor.yml +++ b/vendor/github.com/minio/minio-go/appveyor.yml @@ -21,6 +21,7 @@ install: - go get -u github.com/minio/go-homedir - go get -u github.com/remyoudompheng/go-misc/deadcode - go get -u github.com/gordonklaus/ineffassign + - go get -u github.com/dustin/go-humanize # to run your custom scripts instead of automatic MSBuild build_script: diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go index 9771d2f92..5b00e1e39 100644 --- a/vendor/github.com/minio/minio-go/constants.go +++ b/vendor/github.com/minio/minio-go/constants.go @@ -50,7 +50,7 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 const unsignedPayload = "UNSIGNED-PAYLOAD" // Total number of parallel workers used for multipart operation. -var totalWorkers = 3 +const totalWorkers = 4 // Signature related constants. const ( diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go index 4b1054a69..d94cf3214 100644 --- a/vendor/github.com/minio/minio-go/core.go +++ b/vendor/github.com/minio/minio-go/core.go @@ -17,6 +17,7 @@ package minio import ( + "context" "io" "github.com/minio/minio-go/pkg/policy" @@ -53,13 +54,13 @@ func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, } // PutObject - Upload object. Uploads using single PUT call. -func (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectInfo, error) { - return c.putObjectDo(bucket, object, data, md5Sum, sha256Sum, size, metadata) +func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Sum, sha256Sum []byte, metadata map[string]string) (ObjectInfo, error) { + return c.putObjectDo(context.Background(), bucket, object, data, md5Sum, sha256Sum, size, PutObjectOptions{UserMetadata: metadata}) } -// NewMultipartUpload - Initiates new multipart upload and returns the new uploaID. -func (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) { - result, err := c.initiateMultipartUpload(bucket, object, metadata) +// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. +func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) { + result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts) return result.UploadID, err } @@ -69,14 +70,14 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de } // PutObjectPart - Upload an object part. -func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) { - return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, size, data, md5Sum, sha256Sum, nil) +func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Sum, sha256Sum []byte) (ObjectPart, error) { + return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Sum, sha256Sum, nil) } // PutObjectPartWithMetadata - upload an object part with additional request metadata. -func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, - size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectPart, error) { - return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata) +func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader, + size int64, md5Sum, sha256Sum []byte, metadata map[string]string) (ObjectPart, error) { + return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata) } // ListObjectParts - List uploaded parts of an incomplete upload.x @@ -86,7 +87,7 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker // CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error { - _, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{ + _, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{ Parts: parts, }) return err @@ -94,7 +95,7 @@ func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []C // AbortMultipartUpload - Abort an incomplete upload. func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { - return c.abortMultipartUpload(bucket, object, uploadID) + return c.abortMultipartUpload(context.Background(), bucket, object, uploadID) } // GetBucketPolicy - fetches bucket access policy for a given bucket. @@ -111,7 +112,7 @@ func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPol // partial objects and also downloading objects with special conditions // matching etag, modtime etc. func (c Core) GetObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { - return c.getObject(bucketName, objectName, reqHeaders) + return c.getObject(context.Background(), bucketName, objectName, reqHeaders) } // StatObject is a lower level API implemented to support special diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go new file mode 100644 index 000000000..d7852252f --- /dev/null +++ b/vendor/github.com/minio/minio-go/functional_tests.go @@ -0,0 +1,5889 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "time" + + humanize "github.com/dustin/go-humanize" + minio "github.com/minio/minio-go" + log "github.com/sirupsen/logrus" + + "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/pkg/policy" +) + +const ( + sixtyFiveMiB = 65 * humanize.MiByte // 65MiB + thirtyThreeKiB = 33 * humanize.KiByte // 33KiB + oneMiB = 1 * humanize.MiByte // 1MiB +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +func isQuickMode() bool { + return os.Getenv("MODE") == "quick" +} + +// Tests bucket re-create errors. +func testMakeBucketError() { + region := "eu-central-1" + + // initialize logging params + startTime := time.Now() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // skipping region functional tests for non s3 runs + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(bucketName, region); err != nil { + failureLog(function, args, startTime, "", "MakeBucket Failed", err).Fatal() + } + if err = c.MakeBucket(bucketName, region); err == nil { + failureLog(function, args, startTime, "", "Bucket already exists", err).Fatal() + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal() + } + if err = c.RemoveBucket(bucketName); err != nil { + failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal() + } + + successLogger(function, args, startTime).Info() +} + +func testMetadataSizeLimit() { + startTime := time.Now() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts.UserMetadata": "", + } + rand.Seed(startTime.Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal() + } + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal() + } + + const HeaderSizeLimit = 8 * 1024 + const UserMetadataLimit = 2 * 1024 + + // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail + metadata := make(map[string]string) + metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + failureLog(function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil).Fatal() + } + + // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail + metadata = make(map[string]string) + metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + failureLog(function, args, startTime, "", "Created object with headers exceeding header size limits", nil).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests various bucket supported formats. +func testMakeBucketRegions() { + region := "eu-central-1" + // initialize logging params + startTime := time.Now() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // skipping region functional tests for non s3 runs + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(bucketName, region); err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + if err = c.RemoveBucket(bucketName); err != nil { + failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal() + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + region = "us-west-2" + args["region"] = region + if err = c.MakeBucket(bucketName+".withperiod", region); err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Remove the newly created bucket. + if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil { + failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal() + } + + successLogger(function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectReadAt() { + // initialize logging params + startTime := time.Now() + function := "PutObject(bucketName, objectName, reader, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "objectContentType", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal() + } + + // Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover. + // Use different data for each part for multipart tests to ensure part order at the end. + var reader = getDataReader("datafile-65-MB", sixtyFiveMiB) + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object content type + objectContentType := "binary/octet-stream" + args["objectContentType"] = objectContentType + + n, err := c.PutObject(bucketName, objectName, reader, int64(sixtyFiveMiB), minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(sixtyFiveMiB) { + failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal() + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "Get Object failed", err).Fatal() + } + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat Object failed", err).Fatal() + } + if st.Size != int64(sixtyFiveMiB) { + failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal() + } + if st.ContentType != objectContentType { + failureLog(function, args, startTime, "", "Content types don't match", err).Fatal() + } + if err := r.Close(); err != nil { + failureLog(function, args, startTime, "", "Object Close failed", err).Fatal() + } + if err := r.Close(); err == nil { + failureLog(function, args, startTime, "", "Object is already closed, didn't return error on Close", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + + successLogger(function, args, startTime).Info() +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectWithMetadata() { + // initialize logging params + startTime := time.Now() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if isQuickMode() { + ignoredLog(function, args, startTime, "Skipping functional tests for short runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal() + } + + // Generate data using 2 parts + // Use different data in each part for multipart tests to ensure part order at the end. + var reader = getDataReader("datafile-65-MB", sixtyFiveMiB) + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object custom metadata + customContentType := "custom/contenttype" + + args["metadata"] = map[string][]string{ + "Content-Type": {customContentType}, + } + + n, err := c.PutObject(bucketName, objectName, reader, int64(sixtyFiveMiB), minio.PutObjectOptions{ + ContentType: customContentType}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(sixtyFiveMiB) { + failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal() + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + if st.Size != int64(sixtyFiveMiB) { + failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal() + } + if st.ContentType != customContentType { + failureLog(function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err).Fatal() + } + if err := r.Close(); err != nil { + failureLog(function, args, startTime, "", "Object Close failed", err).Fatal() + } + if err := r.Close(); err == nil { + failureLog(function, args, startTime, "", "Object already closed, should respond with error", err).Fatal() + } + + if err = c.RemoveObject(bucketName, objectName); err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + if err = c.RemoveBucket(bucketName); err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + + successLogger(function, args, startTime).Info() +} + +// Test put object with streaming signature. +func testPutObjectStreaming() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), + "minio-go-test") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Upload an object. + sizes := []int64{0, 64*1024 - 1, 64 * 1024} + + for _, size := range sizes { + data := bytes.Repeat([]byte("a"), int(size)) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal() + } + + if n != size { + failureLog(function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err).Fatal() + } + } + + // Remove the object. + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + // Remove the bucket. + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test listing partially uploaded objects. +func testListPartiallyUploaded() { + // initialize logging params + startTime := time.Now() + function := "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "isRecursive": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + r := bytes.NewReader(bytes.Repeat([]byte("0"), sixtyFiveMiB*2)) + + reader, writer := io.Pipe() + go func() { + i := 0 + for i < 25 { + _, cerr := io.CopyN(writer, r, (sixtyFiveMiB*2)/25) + if cerr != nil { + failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + } + i++ + r.Seek(0, 0) + } + writer.CloseWithError(errors.New("proactively closed to be verified later")) + }() + + objectName := bucketName + "-resumable" + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, int64(sixtyFiveMiB*2), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal() + } + if !strings.Contains(err.Error(), "proactively closed to be verified later") { + failureLog(function, args, startTime, "", "String not found in PutObject output", err).Fatal() + } + + doneCh := make(chan struct{}) + defer close(doneCh) + isRecursive := true + args["isRecursive"] = isRecursive + + multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) + for multiPartObject := range multiPartObjectCh { + if multiPartObject.Err != nil { + failureLog(function, args, startTime, "", "Multipart object error", multiPartObject.Err).Fatal() + } + } + + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test get object seeker from the end, using whence set to '2'. +func testGetObjectSeekEnd() { + // initialize logging params + startTime := time.Now() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal() + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + + if st.Size != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal() + } + + pos, err := r.Seek(-100, 2) + if err != nil { + failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal() + } + if pos != st.Size-100 { + failureLog(function, args, startTime, "", "Incorrect position", err).Fatal() + } + buf2 := make([]byte, 100) + m, err := io.ReadFull(r, buf2) + if err != nil { + failureLog(function, args, startTime, "", "Error reading through io.ReadFull", err).Fatal() + } + if m != len(buf2) { + failureLog(function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err).Fatal() + } + hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) + hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) + if hexBuf1 != hexBuf2 { + failureLog(function, args, startTime, "", "Values at same index dont match", err).Fatal() + } + pos, err = r.Seek(-100, 2) + if err != nil { + failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal() + } + if pos != st.Size-100 { + failureLog(function, args, startTime, "", "Incorrect position", err).Fatal() + } + if err = r.Close(); err != nil { + failureLog(function, args, startTime, "", "ObjectClose failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwice() { + // initialize logging params + startTime := time.Now() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + n, err := c.PutObject(bucketName, objectName, reader, int64(thirtyThreeKiB), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal() + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + if st.Size != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal() + } + if err := r.Close(); err != nil { + failureLog(function, args, startTime, "", "Object Close failed", err).Fatal() + } + if err := r.Close(); err == nil { + failureLog(function, args, startTime, "", "Already closed object. No error returned", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test removing multiple objects with Remove API +func testRemoveMultipleObjects() { + // initialize logging params + startTime := time.Now() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 1100 objects + nrObjects := 1100 + + objectsCh := make(chan string) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + continue + } + objectsCh <- objectName + } + }() + + // Call RemoveObjects API + errorCh := c.RemoveObjects(bucketName, objectsCh) + + // Check if errorCh doesn't receive any error + select { + case r, more := <-errorCh: + if more { + failureLog(function, args, startTime, "", "Unexpected error", r.Err).Fatal() + } + } + + // Clean the bucket created by the test + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests removing partially uploaded objects. +func testRemovePartiallyUploaded() { + // initialize logging params + startTime := time.Now() + function := "RemoveIncompleteUpload(bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024)) + + reader, writer := io.Pipe() + go func() { + i := 0 + for i < 25 { + _, cerr := io.CopyN(writer, r, 128*1024) + if cerr != nil { + failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + } + i++ + r.Seek(0, 0) + } + writer.CloseWithError(errors.New("proactively closed to be verified later")) + }() + + objectName := bucketName + "-resumable" + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, 128*1024, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal() + } + if !strings.Contains(err.Error(), "proactively closed to be verified later") { + failureLog(function, args, startTime, "", "String not found", err).Fatal() + } + err = c.RemoveIncompleteUpload(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests FPutObject of a big file to trigger multipart +func testFPutObjectMultipart() { + // initialize logging params + startTime := time.Now() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. + var fileName = getFilePath("datafile-65-MB") + if os.Getenv("MINT_DATA_DIR") == "" { + // Make a temp file with minPartSize bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal() + } + // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. + _, err = io.Copy(file, getDataReader("non-existent", sixtyFiveMiB)) + if err != nil { + failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + } + err = file.Close() + if err != nil { + failureLog(function, args, startTime, "", "File Close failed", err).Fatal() + } + fileName = file.Name() + args["fileName"] = fileName + } + totalSize := sixtyFiveMiB * 1 + // Set base object name + objectName := bucketName + "FPutObject" + "-standard" + args["objectName"] = objectName + + objectContentType := "testapplication/octet-stream" + args["objectContentType"] = objectContentType + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + n, err := c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + } + if n != int64(totalSize) { + failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + } + + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + objInfo, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Unexpected error", err).Fatal() + } + if objInfo.Size != int64(totalSize) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err).Fatal() + } + if objInfo.ContentType != objectContentType { + failureLog(function, args, startTime, "", "ContentType doesn't match", err).Fatal() + } + + // Remove all objects and bucket and temp file + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests FPutObject with null contentType (default = application/octet-stream) +func testFPutObject() { + // initialize logging params + startTime := time.Now() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getFilePath("datafile-65-MB") + if os.Getenv("MINT_DATA_DIR") == "" { + // Make a temp file with minPartSize bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal() + } + + // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. + var buffer = bytes.Repeat([]byte(string('a')), sixtyFiveMiB) + if _, err = file.Write(buffer); err != nil { + failureLog(function, args, startTime, "", "File write failed", err).Fatal() + } + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + failureLog(function, args, startTime, "", "File close failed", err).Fatal() + } + fName = file.Name() + } + var totalSize = sixtyFiveMiB * 1 + + // Set base object name + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + n, err := c.FPutObject(bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + } + if n != int64(totalSize) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal() + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "File close failed", err).Fatal() + } + if n != int64(totalSize) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal() + } + srcFile, err := os.Open(fName) + if err != nil { + failureLog(function, args, startTime, "", "File open failed", err).Fatal() + } + defer srcFile.Close() + // Add extension to temp file name + tmpFile, err := os.Create(fName + ".gtar") + if err != nil { + failureLog(function, args, startTime, "", "File create failed", err).Fatal() + } + defer tmpFile.Close() + _, err = io.Copy(tmpFile, srcFile) + if err != nil { + failureLog(function, args, startTime, "", "File copy failed", err).Fatal() + } + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + } + if n != int64(totalSize) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal() + } + + // Check headers + rStandard, err := c.StatObject(bucketName, objectName+"-standard") + if err != nil { + failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + } + if rStandard.ContentType != "application/octet-stream" { + failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal() + } + + rOctet, err := c.StatObject(bucketName, objectName+"-Octet") + if err != nil { + failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + } + if rOctet.ContentType != "application/octet-stream" { + failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal() + } + + rGTar, err := c.StatObject(bucketName, objectName+"-GTar") + if err != nil { + failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + } + if rGTar.ContentType != "application/x-gtar" { + failureLog(function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rStandard.ContentType, err).Fatal() + } + + // Remove all objects and bucket and temp file + err = c.RemoveObject(bucketName, objectName+"-standard") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName+"-Octet") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName+"-GTar") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + + err = os.Remove(fName + ".gtar") + if err != nil { + failureLog(function, args, startTime, "", "File remove failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests FPutObjectWithContext request context cancels after timeout +func testFPutObjectWithContext() { + // initialize logging params + startTime := time.Now() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getFilePath("datafile-1-MB") + if os.Getenv("MINT_DATA_DIR") == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") + if err != nil { + failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal() + } + + // Upload 1 parts to trigger multipart upload + var buffer = bytes.Repeat([]byte(string('a')), 1024*1024*1) + if _, err = file.Write(buffer); err != nil { + failureLog(function, args, startTime, "", "File buffer write failed", err).Fatal() + } + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + failureLog(function, args, startTime, "", "File close failed", err).Fatal() + + } + fName = file.Name() + } + var totalSize = 1024 * 1024 * 1 + + // Set base object name + objectName := bucketName + "FPutObjectWithContext" + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + + // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + failureLog(function, args, startTime, "", "Request context cancellation failed", err).Fatal() + } + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed + n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "FPutObjectWithContext failed", err).Fatal() + } + if n != int64(totalSize) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal() + } + + _, err = c.StatObject(bucketName, objectName+"-Longtimeout") + if err != nil { + failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + } + err = c.RemoveObject(bucketName, objectName+"-Shorttimeout") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + // Remove all objects and bucket and temp file + err = c.RemoveObject(bucketName, objectName+"-Longtimeout") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + + err = os.Remove(fName) + if err != nil { + failureLog(function, args, startTime, "", "Remove file failed", err).Fatal() + } + successLogger(function, args, startTime).Info() + +} + +// Tests FPutObjectWithContext request context cancels after timeout +func testFPutObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + function := "FPutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{ContentType:objectContentType}", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getFilePath("datafile-1-MB") + if os.Getenv("MINT_DATA_DIR") == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") + if err != nil { + failureLog(function, args, startTime, "", "Temp file creation failed", err).Fatal() + + } + + // Upload 1 parts to trigger multipart upload + var buffer = bytes.Repeat([]byte(string('a')), 1024*1024*1) + if _, err = file.Write(buffer); err != nil { + failureLog(function, args, startTime, "", "Write buffer to file failed", err).Fatal() + + } + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + failureLog(function, args, startTime, "", "File close failed", err).Fatal() + + } + fName = file.Name() + } + var totalSize = 1024 * 1024 * 1 + + // Set base object name + objectName := bucketName + "FPutObjectWithContext" + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Millisecond) + defer cancel() + + // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + failureLog(function, args, startTime, "", "FPutObjectWithContext with short timeout failed", err).Fatal() + } + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed + n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "FPutObjectWithContext with long timeout failed", err).Fatal() + } + if n != int64(totalSize) { + failureLog(function, args, startTime, "", "Number of bytes does not match:wanted"+string(totalSize)+" got "+string(n), err).Fatal() + } + + _, err = c.StatObject(bucketName, objectName+"-Longtimeout") + if err != nil { + failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + + } + + err = c.RemoveObject(bucketName, objectName+"-Shorttimeout") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveObject(bucketName, objectName+"-Longtimeout") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + + } + + err = os.Remove(fName) + if err != nil { + failureLog(function, args, startTime, "", "Remove file failed", err).Fatal() + + } + successLogger(function, args, startTime).Info() + +} + +// Test validates putObject with context to see if request cancellation is honored. +func testPutObjectWithContext() { + // initialize logging params + startTime := time.Now() + function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{ContentType:objectContentType}", + } + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket call failed", err).Fatal() + } + defer c.RemoveBucket(bucketName) + bufSize := 1<<20 + 32*1024 + var reader = getDataReader("datafile-33-kB", bufSize) + defer reader.Close() + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObjectWithContext with short timeout failed", err).Fatal() + } + + ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + reader = getDataReader("datafile-33-kB", bufSize) + defer reader.Close() + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObjectWithContext with long timeout failed", err).Fatal() + } + + if err = c.RemoveObject(bucketName, objectName); err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() + +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal() + } + + defer func() { + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + }() + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat object failed", err).Fatal() + } + + if st.Size != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal() + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(thirtyThreeKiB)); err != nil { + if err != io.EOF { + failureLog(function, args, startTime, "", "CopyN failed", err).Fatal() + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal() + } + } + + // Generic seek error for errors other than io.EOF + seekErr := errors.New("seek error") + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, thirtyThreeKiB}, + // Start from offset larger than possible + {int64(thirtyThreeKiB) + 1024, 0, 0, seekErr, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, thirtyThreeKiB}, + // Move larger than possible + {int64(thirtyThreeKiB), 1, 0, seekErr, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, seekErr, false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, int64(thirtyThreeKiB) - 1024, io.EOF, true, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(thirtyThreeKiB) - 1024, nil, true, thirtyThreeKiB - 1024, thirtyThreeKiB}, + // Test with whence SEEK_END and with large negative offset + {-int64(thirtyThreeKiB) * 2, 2, 0, seekErr, true, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + // We expect an error + if testCase.err == seekErr && err == nil { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal() + } + // We expect a specific error + if testCase.err != seekErr && testCase.err != err { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal() + } + // If we expect an error go to the next loop + if testCase.err != nil { + continue + } + // Check the returned seek pos + if n != testCase.pos { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err).Fatal() + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + successLogger(function, args, startTime).Info() +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal() + } + + // read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called. + m, err := r.ReadAt(buf1, offset) + if err != nil { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + if m != len(buf1) { + failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err).Fatal() + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + } + offset += 512 + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + + if st.Size != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal() + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + if m != len(buf2) { + failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err).Fatal() + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + if m != len(buf3) { + failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err).Fatal() + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + if m != len(buf4) { + failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err).Fatal() + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + } + if m != len(buf5) { + failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err).Fatal() + } + if !bytes.Equal(buf, buf5) { + failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal() + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test Presigned Post Policy +func testPresignedPostPolicy() { + // initialize logging params + startTime := time.Now() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + buf, err := ioutil.ReadAll(reader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal() + } + + policy := minio.NewPostPolicy() + + if err := policy.SetBucket(""); err == nil { + failureLog(function, args, startTime, "", "SetBucket did not fail for invalid conditions", err).Fatal() + } + if err := policy.SetKey(""); err == nil { + failureLog(function, args, startTime, "", "SetKey did not fail for invalid conditions", err).Fatal() + } + if err := policy.SetKeyStartsWith(""); err == nil { + failureLog(function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err).Fatal() + } + if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { + failureLog(function, args, startTime, "", "SetExpires did not fail for invalid conditions", err).Fatal() + } + if err := policy.SetContentType(""); err == nil { + failureLog(function, args, startTime, "", "SetContentType did not fail for invalid conditions", err).Fatal() + } + if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { + failureLog(function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err).Fatal() + } + + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("image/png") + policy.SetContentLengthRange(1024, 1024*1024) + args["policy"] = policy + + _, _, err = c.PresignedPostPolicy(policy) + if err != nil { + failureLog(function, args, startTime, "", "PresignedPostPolicy failed", err).Fatal() + } + + policy = minio.NewPostPolicy() + + // Remove all objects and buckets + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests copy object +func testCopyObject() { + // initialize logging params + startTime := time.Now() + function := "CopyObject(dst, src)" + args := map[string]interface{}{ + "dst": "", + "src": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(bucketName+"-copy", "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + n, err := c.PutObject(bucketName, objectName, reader, int64(thirtyThreeKiB), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal() + } + + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + + // Copy Source + src := minio.NewSourceInfo(bucketName, objectName, nil) + + // Set copy conditions. + + // All invalid conditions first. + err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) + if err == nil { + failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal() + } + err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) + if err == nil { + failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal() + } + err = src.SetMatchETagCond("") + if err == nil { + failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal() + } + err = src.SetMatchETagExceptCond("") + if err == nil { + failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal() + } + + err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) + if err != nil { + failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal() + } + err = src.SetMatchETagCond(objInfo.ETag) + if err != nil { + failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal() + } + args["src"] = src + + dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) + args["dst"] = dst + if err != nil { + failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + } + + // Perform the Copy + err = c.CopyObject(dst, src) + if err != nil { + failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal() + } + + // Source object + r, err = c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + // Destination object + readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy") + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + if objInfo.Size != objInfoCopy.Size { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err).Fatal() + } + + // CopyObject again but with wrong conditions + src = minio.NewSourceInfo(bucketName, objectName, nil) + err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) + if err != nil { + failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal() + } + err = src.SetMatchETagExceptCond(objInfo.ETag) + if err != nil { + failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal() + } + + // Perform the Copy which should fail + err = c.CopyObject(dst, src) + if err == nil { + failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal() + } + + // Remove all objects and buckets + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveObject(bucketName+"-copy", objectName+"-copy") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName + "-copy") + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// TestEncryptionPutGet tests client side encryption +func testEncryptionPutGet() { + // initialize logging params + startTime := time.Now() + function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "cbcMaterials": "", + "metadata": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate a symmetric key + symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) + + // Generate an assymmetric key from predefine public and private certificates + privateKey, err := hex.DecodeString( + "30820277020100300d06092a864886f70d0101010500048202613082025d" + + "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" + + "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" + + "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" + + "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" + + "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" + + "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" + + "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" + + "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" + + "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" + + "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" + + "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" + + "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" + + "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" + + "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" + + "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" + + "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" + + "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" + + "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" + + "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" + + "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" + + "9945cb5c7d") + + if err != nil { + failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal() + } + + publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" + + "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" + + "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" + + "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" + + "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" + + "80a89e43f29b570203010001") + if err != nil { + failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal() + } + + // Generate an asymmetric key + asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey) + if err != nil { + failureLog(function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err).Fatal() + } + + testCases := []struct { + buf []byte + encKey encrypt.Key + }{ + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, + + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey) + args["cbcMaterials"] = cbcMaterials + + if err != nil { + failureLog(function, args, startTime, "", "NewCBCSecureMaterials failed", err).Fatal() + } + + // Put encrypted data + _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials) + if err != nil { + failureLog(function, args, startTime, "", "PutEncryptedObject failed", err).Fatal() + } + + // Read the data back + r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials) + if err != nil { + failureLog(function, args, startTime, "", "GetEncryptedObject failed", err).Fatal() + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err).Fatal() + } + if recvBuffer.Len() != len(testCase.buf) { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err).Fatal() + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err).Fatal() + } + + // Remove test object + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", RemoveObject failed with: "+err.Error(), err).Fatal() + } + successLogger(function, args, startTime).Info() + + } + + // Remove test bucket + err = c.RemoveBucket(bucketName) + if err != nil { + err = c.RemoveBucket(bucketName) + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// TestEncryptionFPut tests client side encryption +func testEncryptionFPut() { + // initialize logging params + startTime := time.Now() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, cbcMaterials)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "cbcMaterials": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate a symmetric key + symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) + + // Generate an assymmetric key from predefine public and private certificates + privateKey, err := hex.DecodeString( + "30820277020100300d06092a864886f70d0101010500048202613082025d" + + "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" + + "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" + + "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" + + "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" + + "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" + + "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" + + "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" + + "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" + + "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" + + "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" + + "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" + + "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" + + "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" + + "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" + + "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" + + "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" + + "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" + + "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" + + "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" + + "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" + + "9945cb5c7d") + + if err != nil { + failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal() + } + + publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" + + "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" + + "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" + + "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" + + "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" + + "80a89e43f29b570203010001") + if err != nil { + failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal() + } + + // Generate an asymmetric key + asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey) + if err != nil { + failureLog(function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err).Fatal() + } + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + encKey encrypt.Key + }{ + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, + + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey) + args["cbcMaterials"] = cbcMaterials + + if err != nil { + failureLog(function, args, startTime, "", "NewCBCSecureMaterials failed", err).Fatal() + } + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + failureLog(function, args, startTime, "", "file create failed", err).Fatal() + } + _, err = file.Write(testCase.buf) + if err != nil { + failureLog(function, args, startTime, "", "file write failed", err).Fatal() + } + file.Close() + // Put encrypted data + if _, err = c.FPutEncryptedObject(bucketName, objectName, fileName, cbcMaterials); err != nil { + failureLog(function, args, startTime, "", "FPutEncryptedObject failed", err).Fatal() + } + + // Read the data back + r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials) + if err != nil { + failureLog(function, args, startTime, "", "GetEncryptedObject failed", err).Fatal() + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err).Fatal() + } + if recvBuffer.Len() != len(testCase.buf) { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err).Fatal() + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err).Fatal() + } + + // Remove test object + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "Test "+string(i+1)+", RemoveObject failed with: "+err.Error(), err).Fatal() + } + if err = os.Remove(fileName); err != nil { + failureLog(function, args, startTime, "", "File remove failed", err).Fatal() + } + } + + // Remove test bucket + err = c.RemoveBucket(bucketName) + if err != nil { + err = c.RemoveBucket(bucketName) + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} +func testBucketNotification() { + // initialize logging params + startTime := time.Now() + function := "SetBucketNotification(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + + if os.Getenv("NOTIFY_BUCKET") == "" || + os.Getenv("NOTIFY_SERVICE") == "" || + os.Getenv("NOTIFY_REGION") == "" || + os.Getenv("NOTIFY_ACCOUNTID") == "" || + os.Getenv("NOTIFY_RESOURCE") == "" { + ignoredLog(function, args, startTime, "Skipped notification test as it is not configured").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + bucketName := os.Getenv("NOTIFY_BUCKET") + args["bucketName"] = bucketName + + topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) + queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") + + topicConfig := minio.NewNotificationConfig(topicArn) + + topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll) + topicConfig.AddFilterSuffix("jpg") + + queueConfig := minio.NewNotificationConfig(queueArn) + queueConfig.AddEvents(minio.ObjectCreatedAll) + queueConfig.AddFilterPrefix("photos/") + + bNotification := minio.BucketNotification{} + bNotification.AddTopic(topicConfig) + + // Add the same topicConfig again, should have no effect + // because it is duplicated + bNotification.AddTopic(topicConfig) + if len(bNotification.TopicConfigs) != 1 { + failureLog(function, args, startTime, "", "Duplicate entry added", err).Fatal() + } + + // Add and remove a queue config + bNotification.AddQueue(queueConfig) + bNotification.RemoveQueueByArn(queueArn) + + err = c.SetBucketNotification(bucketName, bNotification) + if err != nil { + failureLog(function, args, startTime, "", "SetBucketNotification failed", err).Fatal() + } + + bNotification, err = c.GetBucketNotification(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "GetBucketNotification failed", err).Fatal() + } + + if len(bNotification.TopicConfigs) != 1 { + failureLog(function, args, startTime, "", "Topic config is empty", err).Fatal() + } + + if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { + failureLog(function, args, startTime, "", "Couldn't get the suffix", err).Fatal() + } + + err = c.RemoveAllBucketNotification(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveAllBucketNotification failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests comprehensive list of all methods. +func testFunctional() { + // initialize logging params + startTime := time.Now() + function := "testFunctional()" + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, nil, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + function = "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": bucketName, + } + + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + failureLog(function, args, startTime, "", "File creation failed", err).Fatal() + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + failureLog(function, args, startTime, "", "File write failed", err).Fatal() + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + exists, err = c.BucketExists(bucketName) + function = "BucketExists(bucketName)" + args = map[string]interface{}{ + "bucketName": bucketName, + } + + if err != nil { + failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal() + } + if !exists { + failureLog(function, args, startTime, "", "Could not find the bucket", err).Fatal() + } + + // Asserting the default bucket policy. + policyAccess, err := c.GetBucketPolicy(bucketName, "") + function = "GetBucketPolicy(bucketName, objectPrefix)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectPrefix": "", + } + + if err != nil { + failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal() + } + if policyAccess != "none" { + failureLog(function, args, startTime, "", "policy should be set to none", err).Fatal() + } + // Set the bucket policy to 'public readonly'. + err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly) + function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectPrefix": "", + "bucketPolicy": policy.BucketPolicyReadOnly, + } + + if err != nil { + failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal() + } + // should return policy `readonly`. + policyAccess, err = c.GetBucketPolicy(bucketName, "") + function = "GetBucketPolicy(bucketName, objectPrefix)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectPrefix": "", + } + + if err != nil { + failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal() + } + if policyAccess != "readonly" { + failureLog(function, args, startTime, "", "policy should be set to readonly", err).Fatal() + } + + // Make the bucket 'public writeonly'. + err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly) + function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectPrefix": "", + "bucketPolicy": policy.BucketPolicyWriteOnly, + } + + if err != nil { + failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal() + } + // should return policy `writeonly`. + policyAccess, err = c.GetBucketPolicy(bucketName, "") + function = "GetBucketPolicy(bucketName, objectPrefix)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectPrefix": "", + } + + if err != nil { + failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal() + } + if policyAccess != "writeonly" { + failureLog(function, args, startTime, "", "policy should be set to writeonly", err).Fatal() + } + // Make the bucket 'public read/write'. + err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite) + function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectPrefix": "", + "bucketPolicy": policy.BucketPolicyReadWrite, + } + + if err != nil { + failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal() + } + // should return policy `readwrite`. + policyAccess, err = c.GetBucketPolicy(bucketName, "") + function = "GetBucketPolicy(bucketName, objectPrefix)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectPrefix": "", + } + + if err != nil { + failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal() + } + if policyAccess != "readwrite" { + failureLog(function, args, startTime, "", "policy should be set to readwrite", err).Fatal() + } + // List all buckets. + buckets, err := c.ListBuckets() + function = "ListBuckets()" + args = nil + + if len(buckets) == 0 { + failureLog(function, args, startTime, "", "Found bucket list to be empty", err).Fatal() + } + if err != nil { + failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal() + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + failureLog(function, args, startTime, "", "Bucket: "+bucketName+" not found", err).Fatal() + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("f"), 1<<19) + + function = "PutObject(bucketName, objectName, reader, contentType)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(len(buf)) { + failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal() + } + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-nolength", + "contentType": "binary/octet-stream", + } + + n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(len(buf)) { + failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal() + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal() + } + + objFound = false + isRecursive = true // Recursive is true. + function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal() + } + + incompObjNotFound := true + + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal() + } + + newReader, err := c.GetObject(bucketName, objectName) + function = "GetObject(bucketName, objectName)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + + if !bytes.Equal(newReadBytes, buf) { + failureLog(function, args, startTime, "", "GetObject bytes mismatch", err).Fatal() + } + + err = c.FGetObject(bucketName, objectName, fileName+"-f") + function = "FGetObject(bucketName, objectName, fileName)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + + if err != nil { + failureLog(function, args, startTime, "", "FGetObject failed", err).Fatal() + } + + // Generate presigned HEAD object url. + presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + + if err != nil { + failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal() + } + // Verify if presigned url works. + resp, err := http.Head(presignedHeadURL.String()) + if err != nil { + failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal() + } + if resp.StatusCode != http.StatusOK { + failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err).Fatal() + } + if resp.Header.Get("ETag") == "" { + failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal() + } + resp.Body.Close() + + // Generate presigned GET object url. + presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + + if err != nil { + failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal() + } + + // Verify if presigned url works. + resp, err = http.Get(presignedGetURL.String()) + if err != nil { + failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal() + } + if resp.StatusCode != http.StatusOK { + failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal() + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal() + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal() + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + "reqParams": reqParams, + } + + if err != nil { + failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal() + } + // Verify if presigned url works. + resp, err = http.Get(presignedGetURL.String()) + if err != nil { + failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal() + } + if resp.StatusCode != http.StatusOK { + failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal() + } + newPresignedBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal() + } + if !bytes.Equal(newPresignedBytes, buf) { + failureLog(function, args, startTime, "", "Bytes mismatch for presigned GET URL", err).Fatal() + } + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + failureLog(function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err).Fatal() + } + + presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) + + function = "PresignedPutObject(bucketName, objectName, expires)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + + if err != nil { + failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal() + } + + buf = bytes.Repeat([]byte("g"), 1<<19) + + req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + failureLog(function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err).Fatal() + } + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively cancelled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: http.DefaultTransport, + } + resp, err = httpClient.Do(req) + if err != nil { + failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal() + } + + newReader, err = c.GetObject(bucketName, objectName+"-presigned") + if err != nil { + failureLog(function, args, startTime, "", "GetObject after PresignedPutObject failed", err).Fatal() + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll after GetObject failed", err).Fatal() + } + + if !bytes.Equal(newReadBytes, buf) { + failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName) + function = "RemoveObject(bucketName, objectName)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveObject(bucketName, objectName+"-f") + args["objectName"] = objectName + "-f" + + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName+"-nolength") + args["objectName"] = objectName + "-nolength" + + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName+"-presigned") + args["objectName"] = objectName + "-presigned" + + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName) + function = "RemoveBucket(bucketName)" + args = map[string]interface{}{ + "bucketName": bucketName, + } + + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err == nil { + failureLog(function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err).Fatal() + } + if err.Error() != "The specified bucket does not exist" { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + if err = os.Remove(fileName); err != nil { + failureLog(function, args, startTime, "", "File Remove failed", err).Fatal() + } + if err = os.Remove(fileName + "-f"); err != nil { + failureLog(function, args, startTime, "", "File Remove failed", err).Fatal() + } + function = "testFunctional()" + successLogger(function, args, startTime).Info() +} + +// Test for validating GetObject Reader* methods functioning when the +// object is modified in the object store. +func testGetObjectObjectModified() { + // initialize logging params + startTime := time.Now() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + defer c.RemoveBucket(bucketName) + + // Upload an object. + objectName := "myobject" + content := "helloworld" + _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal() + } + + defer c.RemoveObject(bucketName, objectName) + + reader, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err).Fatal() + } + defer reader.Close() + + // Read a few bytes of the object. + b := make([]byte, 5) + n, err := reader.ReadAt(b, 0) + if err != nil { + failureLog(function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err).Fatal() + } + + // Upload different contents to the same object while object is being read. + newContent := "goodbyeworld" + _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal() + } + + // Confirm that a Stat() call in between doesn't change the Object's cached etag. + _, err = reader.Stat() + expectedError := "At least one of the pre-conditions you specified did not hold" + if err.Error() != expectedError { + failureLog(function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err).Fatal() + } + + // Read again only to find object contents have been modified since last read. + _, err = reader.ReadAt(b, int64(n)) + if err.Error() != expectedError { + failureLog(function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test validates putObject to upload a file seeked at a given offset. +func testPutObjectUploadSeekedObject() { + // initialize logging params + startTime := time.Now() + function := "PutObject(bucketName, objectName, fileToUpload, contentType)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileToUpload": "", + "contentType": "binary/octet-stream", + } + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + defer c.RemoveBucket(bucketName) + + tempfile, err := ioutil.TempFile("", "minio-go-upload-test-") + args["fileToUpload"] = tempfile + + if err != nil { + failureLog(function, args, startTime, "", "TempFile create failed", err).Fatal() + } + + var data []byte + if fileName := getFilePath("datafile-100-kB"); fileName != "" { + data, _ = ioutil.ReadFile(fileName) + } else { + // Generate 100kB data + data = bytes.Repeat([]byte("1"), 120000) + } + var length = len(data) + if _, err = tempfile.Write(data); err != nil { + failureLog(function, args, startTime, "", "TempFile write failed", err).Fatal() + } + + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + offset := length / 2 + if _, err := tempfile.Seek(int64(offset), 0); err != nil { + failureLog(function, args, startTime, "", "TempFile seek failed", err).Fatal() + } + + n, err := c.PutObject(bucketName, objectName, tempfile, int64(length), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + if n != int64(length-offset) { + failureLog(function, args, startTime, "", "Invalid length returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal() + } + tempfile.Close() + if err = os.Remove(tempfile.Name()); err != nil { + failureLog(function, args, startTime, "", "File remove failed", err).Fatal() + } + + length = int(n) + + obj, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + n, err = obj.Seek(int64(offset), 0) + if err != nil { + failureLog(function, args, startTime, "", "Seek failed", err).Fatal() + } + if n != int64(offset) { + failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(offset))+" got "+string(n), err).Fatal() + } + + n, err = c.PutObject(bucketName, objectName+"getobject", obj, int64(length), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + if n != int64(length-offset) { + failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal() + } + + if err = c.RemoveObject(bucketName, objectName); err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + if err = c.RemoveObject(bucketName, objectName+"getobject"); err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + if err = c.RemoveBucket(bucketName); err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests bucket re-create errors. +func testMakeBucketErrorV2() { + // initialize logging params + startTime := time.Now() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-west-1'. + if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil { + failureLog(function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err).Fatal() + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal() + } + if err = c.RemoveBucket(bucketName); err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwiceV2() { + // initialize logging params + startTime := time.Now() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + n, err := c.PutObject(bucketName, objectName, reader, int64(thirtyThreeKiB), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal() + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + + if st.Size != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal() + } + if err := r.Close(); err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + if err := r.Close(); err == nil { + failureLog(function, args, startTime, "", "Object is already closed, should return error", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests removing partially uploaded objects. +func testRemovePartiallyUploadedV2() { + // initialize logging params + startTime := time.Now() + function := "RemoveIncompleteUpload(bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024)) + + reader, writer := io.Pipe() + go func() { + i := 0 + for i < 25 { + _, cerr := io.CopyN(writer, r, 128*1024) + if cerr != nil { + failureLog(function, args, startTime, "", "Copy failed", cerr).Fatal() + } + i++ + r.Seek(0, 0) + } + writer.CloseWithError(errors.New("proactively closed to be verified later")) + }() + + objectName := bucketName + "-resumable" + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal() + } + if err.Error() != "proactively closed to be verified later" { + failureLog(function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err).Fatal() + } + err = c.RemoveIncompleteUpload(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests FPutObject hidden contentType setting +func testFPutObjectV2() { + // initialize logging params + startTime := time.Now() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Make a temp file with 11*1024*1024 bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") + if err != nil { + failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal() + } + + r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) + n, err := io.CopyN(file, r, 11*1024*1024) + if err != nil { + failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + } + if n != int64(11*1024*1024) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal() + } + + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + failureLog(function, args, startTime, "", "File close failed", err).Fatal() + } + + // Set base object name + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + args["fileName"] = file.Name() + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + } + if n != int64(11*1024*1024) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal() + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + + n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + } + if n != int64(11*1024*1024) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal() + } + + // Add extension to temp file name + fileName := file.Name() + err = os.Rename(file.Name(), fileName+".gtar") + if err != nil { + failureLog(function, args, startTime, "", "Rename failed", err).Fatal() + } + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + args["fileName"] = fileName + ".gtar" + + n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + } + if n != int64(11*1024*1024) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal() + } + + // Check headers + rStandard, err := c.StatObject(bucketName, objectName+"-standard") + if err != nil { + failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + } + if rStandard.ContentType != "application/octet-stream" { + failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err).Fatal() + } + + rOctet, err := c.StatObject(bucketName, objectName+"-Octet") + if err != nil { + failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + } + if rOctet.ContentType != "application/octet-stream" { + failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err).Fatal() + } + + rGTar, err := c.StatObject(bucketName, objectName+"-GTar") + if err != nil { + failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + } + if rGTar.ContentType != "application/x-gtar" { + failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err).Fatal() + } + + // Remove all objects and bucket and temp file + err = c.RemoveObject(bucketName, objectName+"-standard") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName+"-Octet") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName+"-GTar") + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + + err = os.Remove(fileName + ".gtar") + if err != nil { + failureLog(function, args, startTime, "", "File remove failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests various bucket supported formats. +func testMakeBucketRegionsV2() { + // initialize logging params + startTime := time.Now() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + if err = c.RemoveBucket(bucketName); err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil { + args["bucketName"] = bucketName + ".withperiod" + args["region"] = "us-west-2" + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Remove the newly created bucket. + if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctionalV2() { + // initialize logging params + startTime := time.Now() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + + // Save the data. + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(thirtyThreeKiB), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal() + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + + if st.Size != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal() + } + + offset := int64(2048) + n, err = r.Seek(offset, 0) + if err != nil { + failureLog(function, args, startTime, "", "Seek failed", err).Fatal() + } + if n != offset { + failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal() + } + n, err = r.Seek(0, 1) + if err != nil { + failureLog(function, args, startTime, "", "Seek failed", err).Fatal() + } + if n != offset { + failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal() + } + _, err = r.Seek(offset, 2) + if err == nil { + failureLog(function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err).Fatal() + } + n, err = r.Seek(-offset, 2) + if err != nil { + failureLog(function, args, startTime, "", "Seek failed", err).Fatal() + } + if n != st.Size-offset { + failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err).Fatal() + } + + var buffer1 bytes.Buffer + if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { + if err != io.EOF { + failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + } + } + if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { + failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal() + } + + // Seek again and read again. + n, err = r.Seek(offset-1, 0) + if err != nil { + failureLog(function, args, startTime, "", "Seek failed", err).Fatal() + } + if n != (offset - 1) { + failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err).Fatal() + } + + var buffer2 bytes.Buffer + if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { + if err != io.EOF { + failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + } + } + // Verify now lesser bytes. + if !bytes.Equal(buf[2047:], buffer2.Bytes()) { + failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctionalV2() { + // initialize logging params + startTime := time.Now() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := ioutil.ReadAll(reader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + + // Save the data + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(thirtyThreeKiB), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal() + } + + // Read the data back + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + + if st.Size != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal() + } + + offset := int64(2048) + + // Read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + if m != len(buf2) { + failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err).Fatal() + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + if m != len(buf3) { + failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err).Fatal() + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + if m != len(buf4) { + failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err).Fatal() + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + } + + buf5 := make([]byte, n) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + } + if m != len(buf5) { + failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err).Fatal() + } + if !bytes.Equal(buf, buf5) { + failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal() + } + + buf6 := make([]byte, n+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + } + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Tests copy object +func testCopyObjectV2() { + // initialize logging params + startTime := time.Now() + function := "CopyObject(destination, source)" + args := map[string]interface{}{ + "destination": "", + "source": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(bucketName+"-copy", "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate 33K of data. + var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + n, err := c.PutObject(bucketName, objectName, reader, int64(thirtyThreeKiB), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(thirtyThreeKiB) { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal() + } + + r, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + + // Copy Source + src := minio.NewSourceInfo(bucketName, objectName, nil) + + // Set copy conditions. + + // All invalid conditions first. + err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) + if err == nil { + failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal() + } + err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) + if err == nil { + failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal() + } + err = src.SetMatchETagCond("") + if err == nil { + failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal() + } + err = src.SetMatchETagExceptCond("") + if err == nil { + failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal() + } + + err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) + if err != nil { + failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal() + } + err = src.SetMatchETagCond(objInfo.ETag) + if err != nil { + failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal() + } + args["source"] = src + + dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) + if err != nil { + failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + } + args["destination"] = dst + + // Perform the Copy + err = c.CopyObject(dst, src) + if err != nil { + failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal() + } + + // Source object + r, err = c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + // Destination object + readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy") + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + if objInfo.Size != objInfoCopy.Size { + failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err).Fatal() + } + + // CopyObject again but with wrong conditions + src = minio.NewSourceInfo(bucketName, objectName, nil) + err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) + if err != nil { + failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal() + } + err = src.SetMatchETagExceptCond(objInfo.ETag) + if err != nil { + failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal() + } + + // Perform the Copy which should fail + err = c.CopyObject(dst, src) + if err == nil { + failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal() + } + + // Remove all objects and buckets + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveObject(bucketName+"-copy", objectName+"-copy") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + + err = c.RemoveBucket(bucketName + "-copy") + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +func testComposeObjectErrorCasesWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + function := "testComposeObjectErrorCasesWrapper(minioClient)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(bucketName, "us-east-1") + + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Test that more than 10K source objects cannot be + // concatenated. + srcArr := [10001]minio.SourceInfo{} + srcSlice := srcArr[:] + dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil) + if err != nil { + failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + } + + if err := c.ComposeObject(dst, srcSlice); err == nil { + failureLog(function, args, startTime, "", "Expected error in ComposeObject", err).Fatal() + } else if err.Error() != "There must be as least one and up to 10000 source objects." { + failureLog(function, args, startTime, "", "Got unexpected error", err).Fatal() + } + + // Create a source with invalid offset spec and check that + // error is returned: + // 1. Create the source object. + const badSrcSize = 5 * 1024 * 1024 + buf := bytes.Repeat([]byte("1"), badSrcSize) + _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + // 2. Set invalid range spec on the object (going beyond + // object size) + badSrc := minio.NewSourceInfo(bucketName, "badObject", nil) + err = badSrc.SetRange(1, badSrcSize) + if err != nil { + failureLog(function, args, startTime, "", "Setting NewSourceInfo failed", err).Fatal() + } + // 3. ComposeObject call should fail. + if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil { + failureLog(function, args, startTime, "", "ComposeObject expected to fail", err).Fatal() + } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { + failureLog(function, args, startTime, "", "Got invalid error", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test expected error cases +func testComposeObjectErrorCasesV2() { + // initialize logging params + startTime := time.Now() + function := "testComposeObjectErrorCasesV2()" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + testComposeObjectErrorCasesWrapper(c) +} + +func testComposeMultipleSources(c *minio.Client) { + // initialize logging params + startTime := time.Now() + function := "ComposeObject(destination, sources)" + args := map[string]interface{}{ + "destination": "", + "sources": "", + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Upload a small source object + const srcSize = 1024 * 1024 * 5 + buf := bytes.Repeat([]byte("1"), srcSize) + _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + // We will append 10 copies of the object. + srcs := []minio.SourceInfo{} + for i := 0; i < 10; i++ { + srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil)) + } + // make the last part very small + err = srcs[9].SetRange(0, 0) + if err != nil { + failureLog(function, args, startTime, "", "SetRange failed", err).Fatal() + } + args["sources"] = srcs + + dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil) + args["destination"] = dst + + if err != nil { + failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + } + err = c.ComposeObject(dst, srcs) + if err != nil { + failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal() + } + + objProps, err := c.StatObject(bucketName, "dstObject") + if err != nil { + failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + } + + if objProps.Size != 9*srcSize+1 { + failureLog(function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test concatenating multiple objects objects +func testCompose10KSourcesV2() { + // initialize logging params + startTime := time.Now() + function := "testCompose10KSourcesV2(minioClient)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + testComposeMultipleSources(c) +} + +func testEncryptedCopyObjectWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + function := "testEncryptedCopyObjectWrapper(minioClient)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256") + key2 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven2"), "AES256") + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + metadata := make(map[string]string) + for k, v := range key1.GetSSEHeaders() { + metadata[k] = v + } + _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: metadata, Progress: nil}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject call failed", err).Fatal() + } + + // 2. copy object and change encryption key + src := minio.NewSourceInfo(bucketName, "srcObject", &key1) + dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil) + if err != nil { + failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + } + + err = c.CopyObject(dst, src) + if err != nil { + failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal() + } + + // 3. get copied object and check if content is equal + reqH := minio.NewGetReqHeaders() + for k, v := range key2.GetSSEHeaders() { + reqH.Set(k, v) + } + coreClient := minio.Core{c} + reader, _, err := coreClient.GetObject(bucketName, "dstObject", reqH) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + defer reader.Close() + + decBytes, err := ioutil.ReadAll(reader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + if !bytes.Equal(decBytes, buf) { + failureLog(function, args, startTime, "", "Downloaded object mismatched for encrypted object", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test encrypted copy object +func testEncryptedCopyObject() { + // initialize logging params + startTime := time.Now() + function := "testEncryptedCopyObject()" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c) +} + +// Test encrypted copy object +func testEncryptedCopyObjectV2() { + // initialize logging params + startTime := time.Now() + function := "testEncryptedCopyObjectV2()" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + } + + testEncryptedCopyObjectWrapper(c) +} + +func testUserMetadataCopying() { + // initialize logging params + startTime := time.Now() + function := "testUserMetadataCopying()" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testUserMetadataCopyingWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + function := "CopyObject(destination, source)" + args := map[string]interface{}{ + "destination": "", + "source": "", + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(bucketName, object) + if err != nil { + failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + // 1. create a client encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + metadata := make(http.Header) + metadata.Set("x-amz-meta-myheader", "myvalue") + m := make(map[string]string) + m["x-amz-meta-myheader"] = "myvalue" + _, err = c.PutObject(bucketName, "srcObject", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) + if err != nil { + failureLog(function, args, startTime, "", "PutObjectWithMetadata failed", err).Fatal() + } + if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { + failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal() + } + + // 2. create source + src := minio.NewSourceInfo(bucketName, "srcObject", nil) + // 2.1 create destination with metadata set + dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"}) + if err != nil { + failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + } + + // 3. Check that copying to an object with metadata set resets + // the headers on the copy. + err = c.CopyObject(dst1, src) + args["destination"] = dst1 + args["source"] = src + + if err != nil { + failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal() + } + + expectedHeaders := make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { + failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal() + } + + // 4. create destination with no metadata set and same source + dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil) + if err != nil { + failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + + } + src = minio.NewSourceInfo(bucketName, "srcObject", nil) + + // 5. Check that copying to an object with no metadata set, + // copies metadata. + err = c.CopyObject(dst2, src) + args["destination"] = dst2 + args["source"] = src + + if err != nil { + failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal() + } + + expectedHeaders = metadata + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { + failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal() + } + + // 6. Compose a pair of sources. + srcs := []minio.SourceInfo{ + minio.NewSourceInfo(bucketName, "srcObject", nil), + minio.NewSourceInfo(bucketName, "srcObject", nil), + } + dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil) + if err != nil { + failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + } + + err = c.ComposeObject(dst3, srcs) + function = "ComposeObject(destination, sources)" + args["destination"] = dst3 + args["source"] = srcs + + if err != nil { + failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal() + } + + // Check that no headers are copied in this case + if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { + failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal() + } + + // 7. Compose a pair of sources with dest user metadata set. + srcs = []minio.SourceInfo{ + minio.NewSourceInfo(bucketName, "srcObject", nil), + minio.NewSourceInfo(bucketName, "srcObject", nil), + } + dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"}) + if err != nil { + failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + } + + err = c.ComposeObject(dst4, srcs) + function = "ComposeObject(destination, sources)" + args["destination"] = dst4 + args["source"] = srcs + + if err != nil { + failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal() + } + + // Check that no headers are copied in this case + expectedHeaders = make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { + failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +func testUserMetadataCopyingV2() { + // initialize logging params + startTime := time.Now() + function := "testUserMetadataCopyingV2()" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +// Test put object with size -1 byte object. +func testPutObjectNoLengthV2() { + // initialize logging params + startTime := time.Now() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), + "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + objectName := bucketName + "unique" + args["objectName"] = objectName + + // Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover. + // Use different data for each part for multipart tests to ensure part order at the end. + var reader = getDataReader("datafile-65-MB", sixtyFiveMiB) + defer reader.Close() + + // Upload an object. + n, err := c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{}) + + if err != nil { + failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal() + } + if n != int64(sixtyFiveMiB) { + failureLog(function, args, startTime, "", "Expected upload object size "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal() + } + + // Remove the object. + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + // Remove the bucket. + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test put objects of unknown size. +func testPutObjectsUnknownV2() { + // initialize logging params + startTime := time.Now() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), + "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Issues are revealed by trying to upload multiple files of unknown size + // sequentially (on 4GB machines) + for i := 1; i <= 4; i++ { + // Simulate that we could be receiving byte slices of data that we want + // to upload as a file + rpipe, wpipe := io.Pipe() + defer rpipe.Close() + go func() { + b := []byte("test") + wpipe.Write(b) + wpipe.Close() + }() + + // Upload the object. + objectName := fmt.Sprintf("%sunique%d", bucketName, i) + args["objectName"] = objectName + + n, err := c.PutObject(bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal() + } + if n != int64(4) { + failureLog(function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err).Fatal() + } + + // Remove the object. + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + } + + // Remove the bucket. + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test put object with 0 byte object. +func testPutObject0ByteV2() { + // initialize logging params + startTime := time.Now() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": 0, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), + "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + objectName := bucketName + "unique" + + // Upload an object. + n, err := c.PutObject(bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) + + if err != nil { + failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal() + } + if n != 0 { + failureLog(function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err).Fatal() + } + + // Remove the object. + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + + // Remove the bucket. + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test expected error cases +func testComposeObjectErrorCases() { + // initialize logging params + startTime := time.Now() + function := "testComposeObjectErrorCases()" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + testComposeObjectErrorCasesWrapper(c) +} + +// Test concatenating 10K objects +func testCompose10KSources() { + // initialize logging params + startTime := time.Now() + function := "testCompose10KSources()" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + } + + testComposeMultipleSources(c) +} + +// Tests comprehensive list of all methods. +func testFunctionalV2() { + // initialize logging params + startTime := time.Now() + function := "testFunctionalV2()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + failureLog(function, args, startTime, "", "file create failed", err).Fatal() + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + failureLog(function, args, startTime, "", "file write failed", err).Fatal() + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + exists, err = c.BucketExists(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal() + } + if !exists { + failureLog(function, args, startTime, "", "Could not find existing bucket "+bucketName, err).Fatal() + } + + // Make the bucket 'public read/write'. + err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite) + if err != nil { + failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal() + } + + // List all buckets. + buckets, err := c.ListBuckets() + if len(buckets) == 0 { + failureLog(function, args, startTime, "", "List buckets cannot be empty", err).Fatal() + } + if err != nil { + failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal() + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + failureLog(function, args, startTime, "", "Bucket "+bucketName+"not found", err).Fatal() + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) + + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + if n != int64(len(buf)) { + failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal() + } + + n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + if n != int64(len(buf)) { + failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal() + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal() + } + + objFound = false + isRecursive = true // Recursive is true. + for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal() + } + + incompObjNotFound := true + for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal() + } + + newReader, err := c.GetObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + newReadBytes, err := ioutil.ReadAll(newReader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + + if !bytes.Equal(newReadBytes, buf) { + failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal() + } + + err = c.FGetObject(bucketName, objectName, fileName+"-f") + if err != nil { + failureLog(function, args, startTime, "", "FgetObject failed", err).Fatal() + } + + // Generate presigned HEAD object url. + presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) + if err != nil { + failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal() + } + // Verify if presigned url works. + resp, err := http.Head(presignedHeadURL.String()) + if err != nil { + failureLog(function, args, startTime, "", "PresignedHeadObject URL head request failed", err).Fatal() + } + if resp.StatusCode != http.StatusOK { + failureLog(function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err).Fatal() + } + if resp.Header.Get("ETag") == "" { + failureLog(function, args, startTime, "", "Got empty ETag", err).Fatal() + } + resp.Body.Close() + + // Generate presigned GET object url. + presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) + if err != nil { + failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal() + } + // Verify if presigned url works. + resp, err = http.Get(presignedGetURL.String()) + if err != nil { + failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal() + } + if resp.StatusCode != http.StatusOK { + failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal() + } + newPresignedBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal() + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + // Generate presigned GET object url. + presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) + if err != nil { + failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal() + } + // Verify if presigned url works. + resp, err = http.Get(presignedGetURL.String()) + if err != nil { + failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal() + } + if resp.StatusCode != http.StatusOK { + failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal() + } + newPresignedBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + if !bytes.Equal(newPresignedBytes, buf) { + failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal() + } + // Verify content disposition. + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + failureLog(function, args, startTime, "", "wrong Content-Disposition received ", err).Fatal() + } + + presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal() + } + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal() + } + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively cancelled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: http.DefaultTransport, + } + resp, err = httpClient.Do(req) + if err != nil { + failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal() + } + + newReader, err = c.GetObject(bucketName, objectName+"-presigned") + if err != nil { + failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + } + + newReadBytes, err = ioutil.ReadAll(newReader) + if err != nil { + failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + } + + if !bytes.Equal(newReadBytes, buf) { + failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveObject(bucketName, objectName+"-f") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveObject(bucketName, objectName+"-nolength") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveObject(bucketName, objectName+"-presigned") + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err == nil { + failureLog(function, args, startTime, "", "RemoveBucket should fail as bucket does not exist", err).Fatal() + } + if err.Error() != "The specified bucket does not exist" { + failureLog(function, args, startTime, "", "RemoveBucket failed with wrong error message", err).Fatal() + } + if err = os.Remove(fileName); err != nil { + failureLog(function, args, startTime, "", "File remove failed", err).Fatal() + } + if err = os.Remove(fileName + "-f"); err != nil { + failureLog(function, args, startTime, "", "File removes failed", err).Fatal() + } + successLogger(function, args, startTime).Info() +} + +// Test get object with GetObjectWithContext +func testGetObjectWithContext() { + // initialize logging params + startTime := time.Now() + function := "GetObjectWithContext(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client v4 object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate data more than 32K. + bufSize := 1<<20 + 32*1024 + var reader = getDataReader("datafile-33-kB", bufSize) + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + + } + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + // Read the data back + r, err := c.GetObjectWithContext(ctx, bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObjectWithContext failed - request timeout not honored", err).Fatal() + + } + ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + + // Read the data back + r, err = c.GetObjectWithContext(ctx, bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObjectWithContext failed", err).Fatal() + + } + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "object Stat call failed", err).Fatal() + } + if st.Size != int64(bufSize) { + failureLog(function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err).Fatal() + } + if err := r.Close(); err != nil { + failureLog(function, args, startTime, "", "object Close() call failed", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject call failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket call failed", err).Fatal() + } + successLogger(function, args, startTime).Info() + +} + +// Test get object with FGetObjectWithContext +func testFGetObjectWithContext() { + // initialize logging params + startTime := time.Now() + function := "FGetObjectWithContext(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client v4 object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate data more than 32K. + var reader = getDataReader("datafile-1-MiB", oneMiB) + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + _, err = c.PutObject(bucketName, objectName, reader, int64(oneMiB), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Millisecond) + defer cancel() + + fileName := "tempfile-context" + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f") + if err == nil { + failureLog(function, args, startTime, "", "FGetObjectWithContext with short timeout failed", err).Fatal() + } + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext") + if err != nil { + failureLog(function, args, startTime, "", "FGetObjectWithContext with long timeout failed", err).Fatal() + } + if err = os.Remove(fileName + "-fcontext"); err != nil { + failureLog(function, args, startTime, "", "Remove file failed", err).Fatal() + + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject call failed", err).Fatal() + + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket call failed", err).Fatal() + + } + successLogger(function, args, startTime).Info() + +} + +// Test validates putObject with context to see if request cancellation is honored for V2. +func testPutObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{ContentType:objectContentType}", + } + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + + } + defer c.RemoveBucket(bucketName) + bufSize := 1<<20 + 32*1024 + var reader = getDataReader("datafile-33-kB", bufSize) + defer reader.Close() + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObjectWithContext with short timeout failed", err).Fatal() + } + + ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + reader = getDataReader("datafile-33-kB", bufSize) + defer reader.Close() + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObjectWithContext with long timeout failed", err).Fatal() + } + + if err = c.RemoveObject(bucketName, objectName); err != nil { + failureLog(function, args, startTime, "", "RemoveObject call failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket call failed", err).Fatal() + } + successLogger(function, args, startTime).Info() + +} + +// Test get object with GetObjectWithContext +func testGetObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + function := "GetObjectWithContext(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + } + + // Generate data more than 32K. + bufSize := 1<<20 + 32*1024 + var reader = getDataReader("datafile-33-kB", bufSize) + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject call failed", err).Fatal() + } + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + // Read the data back + r, err := c.GetObjectWithContext(ctx, bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObjectWithContext failed due to non-cancellation upon short timeout", err).Fatal() + + } + ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + + // Read the data back + r, err = c.GetObjectWithContext(ctx, bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "GetObjectWithContext failed due to non-cancellation upon long timeout", err).Fatal() + + } + + st, err := r.Stat() + if err != nil { + failureLog(function, args, startTime, "", "object Stat call failed", err).Fatal() + } + if st.Size != int64(bufSize) { + failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err).Fatal() + } + if err := r.Close(); err != nil { + failureLog(function, args, startTime, "", " object Close() call failed", err).Fatal() + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject call failed", err).Fatal() + + } + + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket call failed", err).Fatal() + } + successLogger(function, args, startTime).Info() + +} + +// Test get object with FGetObjectWithContext +func testFGetObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + function := "FGetObjectWithContext(ctx, bucketName, objectName,fileName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + failureLog(function, args, startTime, "", "MakeBucket call failed", err).Fatal() + } + + // Generate data more than 32K. + + var reader = getDataReader("datafile-1-MiB", oneMiB) + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + _, err = c.PutObject(bucketName, objectName, reader, int64(oneMiB), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + failureLog(function, args, startTime, "", "PutObject call failed", err).Fatal() + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) + defer cancel() + + fileName := "tempfile-context" + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f") + if err == nil { + failureLog(function, args, startTime, "", "FGetObjectWithContext call with short request timeout failed", err).Fatal() + + } + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext") + if err != nil { + failureLog(function, args, startTime, "", "FGetObjectWithContext call with long request timeout failed", err).Fatal() + } + + if err = os.Remove(fileName + "-fcontext"); err != nil { + failureLog(function, args, startTime, "", "Remove file failed", err).Fatal() + } + err = c.RemoveObject(bucketName, objectName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveObject call failed", err).Fatal() + } + err = c.RemoveBucket(bucketName) + if err != nil { + failureLog(function, args, startTime, "", "RemoveBucket call failed", err).Fatal() + } + successLogger(function, args, startTime).Info() + +} + +// Convert string to bool and always return false if any error +func mustParseBool(str string) bool { + b, err := strconv.ParseBool(str) + if err != nil { + return false + } + return b +} + +func main() { + // Output to stdout instead of the default stderr + log.SetOutput(os.Stdout) + // create custom formatter + mintFormatter := mintJSONFormatter{} + // set custom formatter + log.SetFormatter(&mintFormatter) + // log Info or above -- success cases are Info level, failures are Fatal level + log.SetLevel(log.InfoLevel) + // execute tests + if !isQuickMode() { + testMakeBucketErrorV2() + testGetObjectClosedTwiceV2() + testRemovePartiallyUploadedV2() + testFPutObjectV2() + testMakeBucketRegionsV2() + testGetObjectReadSeekFunctionalV2() + testGetObjectReadAtFunctionalV2() + testCopyObjectV2() + testFunctionalV2() + testComposeObjectErrorCasesV2() + testCompose10KSourcesV2() + testEncryptedCopyObjectV2() + testUserMetadataCopyingV2() + testPutObject0ByteV2() + testPutObjectNoLengthV2() + testPutObjectsUnknownV2() + testGetObjectWithContextV2() + testFPutObjectWithContextV2() + testFGetObjectWithContextV2() + testPutObjectWithContextV2() + testMakeBucketError() + testMakeBucketRegions() + testPutObjectWithMetadata() + testPutObjectReadAt() + testPutObjectStreaming() + testListPartiallyUploaded() + testGetObjectSeekEnd() + testGetObjectClosedTwice() + testRemoveMultipleObjects() + testRemovePartiallyUploaded() + testFPutObjectMultipart() + testFPutObject() + testGetObjectReadSeekFunctional() + testGetObjectReadAtFunctional() + testPresignedPostPolicy() + testCopyObject() + testEncryptionPutGet() + testEncryptionFPut() + testComposeObjectErrorCases() + testCompose10KSources() + testUserMetadataCopying() + testEncryptedCopyObject() + testBucketNotification() + testFunctional() + testGetObjectObjectModified() + testPutObjectUploadSeekedObject() + testGetObjectWithContext() + testFPutObjectWithContext() + testFGetObjectWithContext() + testPutObjectWithContext() + } else { + testFunctional() + testFunctionalV2() + } +} diff --git a/vendor/github.com/minio/minio-go/transport_1_5.go b/vendor/github.com/minio/minio-go/transport_1_5.go deleted file mode 100644 index 468daafd3..000000000 --- a/vendor/github.com/minio/minio-go/transport_1_5.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build go1.5,!go1.6,!go1.7,!go1.8 - -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "net/http" - "time" -) - -// This default transport is similar to http.DefaultTransport -// but with additional DisableCompression: -var defaultMinioTransport http.RoundTripper = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - TLSHandshakeTimeout: 10 * time.Second, - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: - // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 - DisableCompression: true, -} diff --git a/vendor/github.com/minio/minio-go/transport_1_6.go b/vendor/github.com/minio/minio-go/transport_1_6.go deleted file mode 100644 index 77e7d76fc..000000000 --- a/vendor/github.com/minio/minio-go/transport_1_6.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build go1.6,!go1.7,!go1.8 - -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "net/http" - "time" -) - -// This default transport is similar to http.DefaultTransport -// but with additional DisableCompression: -var defaultMinioTransport http.RoundTripper = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: - // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 - DisableCompression: true, -} diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go index 6f54639e0..c85926225 100644 --- a/vendor/github.com/minio/minio-go/utils.go +++ b/vendor/github.com/minio/minio-go/utils.go @@ -212,3 +212,41 @@ func getDefaultLocation(u url.URL, regionOverride string) (location string) { // Default to location to 'us-east-1'. return "us-east-1" } + +var supportedHeaders = []string{ + "content-type", + "cache-control", + "content-encoding", + "content-disposition", + // Add more supported headers here. +} + +// cseHeaders is list of client side encryption headers +var cseHeaders = []string{ + "X-Amz-Iv", + "X-Amz-Key", + "X-Amz-Matdesc", +} + +// isStandardHeader returns true if header is a supported header and not a custom header +func isStandardHeader(headerKey string) bool { + for _, header := range supportedHeaders { + if strings.Compare(strings.ToLower(headerKey), header) == 0 { + return true + } + } + return false +} + +// isCSEHeader returns true if header is a client side encryption header. +func isCSEHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + for _, h := range cseHeaders { + header := strings.ToLower(h) + if (header == key) || + (("x-amz-meta-" + header) == key) { + return true + } + } + return false +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 1e7ea0b66..0939e878c 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -312,10 +312,10 @@ "revisionTime": "2016-02-29T08:42:30-08:00" }, { - "checksumSHA1": "RoElkV9hrX7Zd8YivXD+JOJOumA=", + "checksumSHA1": "mqxOM3CsubB09O0nDEe4efu0JLQ=", "path": "github.com/minio/minio-go", - "revision": "84539d76271caeffb7a1d5f058bd83c6449f8145", - "revisionTime": "2017-09-01T08:51:27Z" + "revision": "414c6b6a2e97428776cd831d9745589ebcf873e5", + "revisionTime": "2017-09-27T19:03:45Z" }, { "checksumSHA1": "5juljGXPkBWENR2Os7dlnPQER48=",