Add CopyObjectPart support to gateway S3 (#5213)

- Adds a metadata argument to the CopyObjectPart API to facilitate
  implementing encryption for copying APIs too.

- Update vendored minio-go - this version implements the
  CopyObjectPart client API for use with the S3 gateway.

Fixes #4885
master
Aditya Manthramurthy 7 years ago committed by Nitish Tiwari
parent 490c30f853
commit 043e030a4a
  1. 4
      cmd/fs-v1-multipart.go
  2. 6
      cmd/gateway-b2.go
  3. 2
      cmd/gateway-s3-anonymous.go
  4. 19
      cmd/gateway-s3.go
  5. 2
      cmd/gateway-unsupported.go
  6. 2
      cmd/object-api-interface.go
  7. 3
      cmd/object-handlers.go
  8. 2
      cmd/xl-v1-multipart.go
  9. 17
      vendor/github.com/minio/minio-go/Makefile
  10. 2
      vendor/github.com/minio/minio-go/NOTICE
  11. 18
      vendor/github.com/minio/minio-go/README.md
  12. 71
      vendor/github.com/minio/minio-go/api-compose-object.go
  13. 3
      vendor/github.com/minio/minio-go/api-datatypes.go
  14. 71
      vendor/github.com/minio/minio-go/api-error-response.go
  15. 3
      vendor/github.com/minio/minio-go/api-get-object-context.go
  16. 3
      vendor/github.com/minio/minio-go/api-get-object-file.go
  17. 15
      vendor/github.com/minio/minio-go/api-get-object.go
  18. 3
      vendor/github.com/minio/minio-go/api-get-options.go
  19. 9
      vendor/github.com/minio/minio-go/api-get-policy.go
  20. 31
      vendor/github.com/minio/minio-go/api-list.go
  21. 22
      vendor/github.com/minio/minio-go/api-notification.go
  22. 56
      vendor/github.com/minio/minio-go/api-presigned.go
  23. 36
      vendor/github.com/minio/minio-go/api-put-bucket.go
  24. 29
      vendor/github.com/minio/minio-go/api-put-object-common.go
  25. 27
      vendor/github.com/minio/minio-go/api-put-object-context.go
  26. 3
      vendor/github.com/minio/minio-go/api-put-object-copy.go
  27. 3
      vendor/github.com/minio/minio-go/api-put-object-encrypted.go
  28. 27
      vendor/github.com/minio/minio-go/api-put-object-file-context.go
  29. 3
      vendor/github.com/minio/minio-go/api-put-object-file.go
  30. 49
      vendor/github.com/minio/minio-go/api-put-object-multipart.go
  31. 41
      vendor/github.com/minio/minio-go/api-put-object-streaming.go
  32. 8
      vendor/github.com/minio/minio-go/api-put-object.go
  33. 33
      vendor/github.com/minio/minio-go/api-remove.go
  34. 3
      vendor/github.com/minio/minio-go/api-s3-datatypes.go
  35. 21
      vendor/github.com/minio/minio-go/api-stat.go
  36. 39
      vendor/github.com/minio/minio-go/api.go
  37. 7
      vendor/github.com/minio/minio-go/bucket-cache.go
  38. 3
      vendor/github.com/minio/minio-go/bucket-notification.go
  39. 3
      vendor/github.com/minio/minio-go/constants.go
  40. 26
      vendor/github.com/minio/minio-go/core.go
  41. 3
      vendor/github.com/minio/minio-go/hook-reader.go
  42. 39
      vendor/github.com/minio/minio-go/post-policy.go
  43. 17
      vendor/github.com/minio/minio-go/retry-continous.go
  44. 3
      vendor/github.com/minio/minio-go/retry.go
  45. 3
      vendor/github.com/minio/minio-go/s3-endpoints.go
  46. 3
      vendor/github.com/minio/minio-go/s3-error.go
  47. 2
      vendor/github.com/minio/minio-go/transport.go
  48. 57
      vendor/github.com/minio/minio-go/utils.go
  49. 6
      vendor/vendor.json

@ -436,7 +436,9 @@ func partToAppend(fsMeta fsMetaV1, fsAppendMeta fsMetaV1) (part objectPartInfo,
// CopyObjectPart - similar to PutObjectPart but reads data from an existing // CopyObjectPart - similar to PutObjectPart but reads data from an existing
// object. Internally incoming data is written to '.minio.sys/tmp' location // object. Internally incoming data is written to '.minio.sys/tmp' location
// and safely renamed to '.minio.sys/multipart' for reach parts. // and safely renamed to '.minio.sys/multipart' for reach parts.
func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (pi PartInfo, e error) { func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
startOffset int64, length int64, metadata map[string]string) (pi PartInfo, e error) {
if err := checkNewMultipartArgs(srcBucket, srcObject, fs); err != nil { if err := checkNewMultipartArgs(srcBucket, srcObject, fs); err != nil {
return pi, err return pi, err
} }

@ -598,12 +598,6 @@ func (l *b2Objects) NewMultipartUpload(bucket string, object string, metadata ma
return lf.ID, nil return lf.ID, nil
} }
// CopyObjectPart copy part of object to other bucket and object.
func (l *b2Objects) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string,
uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) {
return PartInfo{}, errors.Trace(NotImplemented{})
}
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API. // PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *h2.Reader) (pi PartInfo, err error) { func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *h2.Reader) (pi PartInfo, err error) {
bkt, err := l.Bucket(bucket) bkt, err := l.Bucket(bucket)

@ -26,7 +26,7 @@ import (
// AnonPutObject creates a new object anonymously with the incoming data, // AnonPutObject creates a new object anonymously with the incoming data,
func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, e error) { func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, e error) {
oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5(), data.SHA256(), toMinioClientMetadata(metadata)) oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), toMinioClientMetadata(metadata))
if err != nil { if err != nil {
return objInfo, s3ToObjectError(errors.Trace(err), bucket, object) return objInfo, s3ToObjectError(errors.Trace(err), bucket, object)
} }

@ -417,7 +417,7 @@ func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
// PutObject creates a new object with the incoming data, // PutObject creates a new object with the incoming data,
func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) { func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5(), data.SHA256(), toMinioClientMetadata(metadata)) oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5HexString(), data.SHA256HexString(), toMinioClientMetadata(metadata))
if err != nil { if err != nil {
return objInfo, s3ToObjectError(errors.Trace(err), bucket, object) return objInfo, s3ToObjectError(errors.Trace(err), bucket, object)
} }
@ -438,6 +438,21 @@ func (l *s3Objects) CopyObject(srcBucket string, srcObject string, dstBucket str
return l.GetObjectInfo(dstBucket, dstObject) return l.GetObjectInfo(dstBucket, dstObject)
} }
// CopyObjectPart creates a part in a multipart upload by copying
// existing object or a part of it.
func (l *s3Objects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, metadata map[string]string) (p PartInfo, err error) {
completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject,
uploadID, partID, startOffset, length, metadata)
if err != nil {
return p, s3ToObjectError(errors.Trace(err), srcBucket, srcObject)
}
p.PartNumber = completePart.PartNumber
p.ETag = completePart.ETag
return p, nil
}
// DeleteObject deletes a blob in bucket // DeleteObject deletes a blob in bucket
func (l *s3Objects) DeleteObject(bucket string, object string) error { func (l *s3Objects) DeleteObject(bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object) err := l.Client.RemoveObject(bucket, object)
@ -537,7 +552,7 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
// PutObjectPart puts a part of object in bucket // PutObjectPart puts a part of object in bucket
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) { func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5(), data.SHA256()) info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5HexString(), data.SHA256HexString())
if err != nil { if err != nil {
return pi, s3ToObjectError(errors.Trace(err), bucket, object) return pi, s3ToObjectError(errors.Trace(err), bucket, object)
} }

@ -73,7 +73,7 @@ func (a gatewayUnsupported) DeleteBucketPolicies(bucket string) error {
// CopyObjectPart - Not implemented. // CopyObjectPart - Not implemented.
func (a gatewayUnsupported) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, func (a gatewayUnsupported) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string,
partID int, startOffset int64, length int64) (info PartInfo, err error) { partID int, startOffset int64, length int64, metadata map[string]string) (info PartInfo, err error) {
return info, errors.Trace(NotImplemented{}) return info, errors.Trace(NotImplemented{})
} }

@ -45,7 +45,7 @@ type ObjectLayer interface {
// Multipart operations. // Multipart operations.
ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error)
CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, metadata map[string]string) (info PartInfo, err error)
PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
AbortMultipartUpload(bucket, object, uploadID string) error AbortMultipartUpload(bucket, object, uploadID string) error

@ -789,7 +789,8 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
// Copy source object to destination, if source and destination // Copy source object to destination, if source and destination
// object is same then only metadata is updated. // object is same then only metadata is updated.
partInfo, err := objectAPI.CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length) partInfo, err := objectAPI.CopyObjectPart(srcBucket, srcObject, dstBucket,
dstObject, uploadID, partID, startOffset, length, nil)
if err != nil { if err != nil {
errorIf(err, "Unable to perform CopyObjectPart %s/%s", srcBucket, srcObject) errorIf(err, "Unable to perform CopyObjectPart %s/%s", srcBucket, srcObject)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)

@ -573,7 +573,7 @@ func (xl xlObjects) NewMultipartUpload(bucket, object string, meta map[string]st
// data is read from an existing object. // data is read from an existing object.
// //
// Implements S3 compatible Upload Part Copy API. // Implements S3 compatible Upload Part Copy API.
func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (pi PartInfo, e error) { func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, metadata map[string]string) (pi PartInfo, e error) {
if err := checkNewMultipartArgs(srcBucket, srcObject, xl); err != nil { if err := checkNewMultipartArgs(srcBucket, srcObject, xl); err != nil {
return pi, err return pi, err
} }

@ -0,0 +1,17 @@
all: checks
checks:
@go get -u github.com/go-ini/ini/...
@go get -u github.com/minio/go-homedir/...
@go get -u github.com/cheggaaa/pb/...
@go get -u github.com/sirupsen/logrus/...
@go get -u github.com/dustin/go-humanize/...
@go vet ./...
@SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./...
@SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go
@mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done
@go get -u github.com/a8m/mark/...
@go get -u github.com/minio/cli/...
@go get -u golang.org/x/tools/cmd/goimports
@go get -u github.com/gernest/wow/...
@go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl

@ -0,0 +1,2 @@
minio-go
Copyright 2015-2017 Minio, Inc.

@ -1,19 +1,7 @@
# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) # Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
**Supported cloud storage providers:**
- AWS Signature Version 4
- Amazon S3
- Minio
- AWS Signature Version 2
- Google Cloud Storage (Compatibility Mode)
- Openstack Swift + Swift3 middleware
- Ceph Object Gateway
- Riak CS
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference). This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang). This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
@ -155,6 +143,7 @@ The full API Reference is available here.
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) * [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) * [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) * [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
### API Reference : Object Operations ### API Reference : Object Operations
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) * [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) * [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
@ -210,6 +199,7 @@ The full API Reference is available here.
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) * [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) * [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) * [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
### Full Examples : Object Operations ### Full Examples : Object Operations
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) * [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) * [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
@ -243,3 +233,5 @@ The full API Reference is available here.
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) [![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) [![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)
## License
This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -59,7 +60,7 @@ func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
return map[string]string{ return map[string]string{
"x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo, "x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
"x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key), "x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key),
"x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)), "x-amz-" + cs + "server-side-encryption-customer-key-MD5": sumMD5Base64(s.key),
} }
} }
@ -116,7 +117,7 @@ func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
k = k[len("x-amz-meta-"):] k = k[len("x-amz-meta-"):]
} }
if _, ok := m[k]; ok { if _, ok := m[k]; ok {
return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k) return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k))
} }
m[k] = v m[k] = v
} }
@ -248,9 +249,9 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s
for k, v := range s.decryptKey.getSSEHeaders(false) { for k, v := range s.decryptKey.getSSEHeaders(false) {
opts.Set(k, v) opts.Set(k, v)
} }
objInfo, err = c.statObject(s.bucket, s.object, opts) objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts)
if err != nil { if err != nil {
err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err) err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err))
} else { } else {
size = objInfo.Size size = objInfo.Size
etag = objInfo.ETag etag = objInfo.ETag
@ -311,6 +312,56 @@ func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuck
return objInfo, nil return objInfo, nil
} }
func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) {
headers := make(http.Header)
// Set source
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
if startOffset < 0 {
return p, ErrInvalidArgument("startOffset must be non-negative")
}
if length >= 0 {
headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
}
for k, v := range metadata {
headers.Set(k, v)
}
queryValues := make(url.Values)
queryValues.Set("partNumber", strconv.Itoa(partID))
queryValues.Set("uploadId", uploadID)
resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
bucketName: destBucket,
objectName: destObject,
customHeader: headers,
queryValues: queryValues,
})
defer closeResponse(resp)
if err != nil {
return
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return p, httpRespToErrorResponse(resp, destBucket, destObject)
}
// Decode copy-part response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return p, err
}
p.PartNumber, p.ETag = partID, cpObjRes.ETag
return p, nil
}
// uploadPartCopy - helper function to create a part in a multipart // uploadPartCopy - helper function to create a part in a multipart
// upload via an upload-part-copy request // upload via an upload-part-copy request
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
@ -366,7 +417,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
for i, src := range srcs { for i, src := range srcs {
size, etag, srcUserMeta, err = src.getProps(c) size, etag, srcUserMeta, err = src.getProps(c)
if err != nil { if err != nil {
return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err) return err
} }
// Error out if client side encryption is used in this source object when // Error out if client side encryption is used in this source object when
@ -478,7 +529,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
} }
uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders}) uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders})
if err != nil { if err != nil {
return fmt.Errorf("Error creating new upload: %v", err) return err
} }
// 2. Perform copy part uploads // 2. Perform copy part uploads
@ -506,7 +557,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
complPart, err := c.uploadPartCopy(ctx, dst.bucket, complPart, err := c.uploadPartCopy(ctx, dst.bucket,
dst.object, uploadID, partIndex, h) dst.object, uploadID, partIndex, h)
if err != nil { if err != nil {
return fmt.Errorf("Error in upload-part-copy - %v", err) return err
} }
objParts = append(objParts, complPart) objParts = append(objParts, complPart)
partIndex++ partIndex++
@ -517,9 +568,9 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
_, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID, _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID,
completeMultipartUpload{Parts: objParts}) completeMultipartUpload{Parts: objParts})
if err != nil { if err != nil {
err = fmt.Errorf("Error in complete-multipart request - %v", err) return err
} }
return err return nil
} }
// partsRequired is ceiling(size / copyPartSize) // partsRequired is ceiling(size / copyPartSize)

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -20,7 +21,6 @@ import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
) )
/* **** SAMPLE ERROR RESPONSE **** /* **** SAMPLE ERROR RESPONSE ****
@ -49,6 +49,9 @@ type ErrorResponse struct {
// only in HEAD bucket and ListObjects response. // only in HEAD bucket and ListObjects response.
Region string Region string
// Underlying HTTP status code for the returned error
StatusCode int `xml:"-" json:"-"`
// Headers of the returned S3 XML error // Headers of the returned S3 XML error
Headers http.Header `xml:"-" json:"-"` Headers http.Header `xml:"-" json:"-"`
} }
@ -100,7 +103,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
msg := "Response is empty. " + reportIssue msg := "Response is empty. " + reportIssue
return ErrInvalidArgument(msg) return ErrInvalidArgument(msg)
} }
var errResp ErrorResponse
errResp := ErrorResponse{
StatusCode: resp.StatusCode,
}
err := xmlDecoder(resp.Body, &errResp) err := xmlDecoder(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers. // Xml decoding failed with no body, fall back to HTTP headers.
@ -109,12 +115,14 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
case http.StatusNotFound: case http.StatusNotFound:
if objectName == "" { if objectName == "" {
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "NoSuchBucket", Code: "NoSuchBucket",
Message: "The specified bucket does not exist.", Message: "The specified bucket does not exist.",
BucketName: bucketName, BucketName: bucketName,
} }
} else { } else {
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "NoSuchKey", Code: "NoSuchKey",
Message: "The specified key does not exist.", Message: "The specified key does not exist.",
BucketName: bucketName, BucketName: bucketName,
@ -123,6 +131,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
} }
case http.StatusForbidden: case http.StatusForbidden:
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "AccessDenied", Code: "AccessDenied",
Message: "Access Denied.", Message: "Access Denied.",
BucketName: bucketName, BucketName: bucketName,
@ -130,12 +139,14 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
} }
case http.StatusConflict: case http.StatusConflict:
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "Conflict", Code: "Conflict",
Message: "Bucket not empty.", Message: "Bucket not empty.",
BucketName: bucketName, BucketName: bucketName,
} }
case http.StatusPreconditionFailed: case http.StatusPreconditionFailed:
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "PreconditionFailed", Code: "PreconditionFailed",
Message: s3ErrorResponseMap["PreconditionFailed"], Message: s3ErrorResponseMap["PreconditionFailed"],
BucketName: bucketName, BucketName: bucketName,
@ -143,6 +154,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
} }
default: default:
errResp = ErrorResponse{ errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: resp.Status, Code: resp.Status,
Message: resp.Status, Message: resp.Status,
BucketName: bucketName, BucketName: bucketName,
@ -150,7 +162,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
} }
} }
// Save hodID, requestID and region information // Save hostID, requestID and region information
// from headers if not available through error XML. // from headers if not available through error XML.
if errResp.RequestID == "" { if errResp.RequestID == "" {
errResp.RequestID = resp.Header.Get("x-amz-request-id") errResp.RequestID = resp.Header.Get("x-amz-request-id")
@ -162,7 +174,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
errResp.Region = resp.Header.Get("x-amz-bucket-region") errResp.Region = resp.Header.Get("x-amz-bucket-region")
} }
if errResp.Code == "InvalidRegion" && errResp.Region != "" { if errResp.Code == "InvalidRegion" && errResp.Region != "" {
errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region) errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
} }
// Save headers returned in the API XML error // Save headers returned in the API XML error
@ -173,10 +185,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. // ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
func ErrTransferAccelerationBucket(bucketName string) error { func ErrTransferAccelerationBucket(bucketName string) error {
msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").")
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "InvalidArgument", Code: "InvalidArgument",
Message: msg, Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
BucketName: bucketName, BucketName: bucketName,
} }
} }
@ -185,6 +197,7 @@ func ErrTransferAccelerationBucket(bucketName string) error {
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "EntityTooLarge", Code: "EntityTooLarge",
Message: msg, Message: msg,
BucketName: bucketName, BucketName: bucketName,
@ -194,9 +207,10 @@ func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName st
// ErrEntityTooSmall - Input size is smaller than supported minimum. // ErrEntityTooSmall - Input size is smaller than supported minimum.
func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", totalSize) msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
return ErrorResponse{ return ErrorResponse{
Code: "EntityTooLarge", StatusCode: http.StatusBadRequest,
Code: "EntityTooSmall",
Message: msg, Message: msg,
BucketName: bucketName, BucketName: bucketName,
Key: objectName, Key: objectName,
@ -205,9 +219,9 @@ func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
// ErrUnexpectedEOF - Unexpected end of file reached. // ErrUnexpectedEOF - Unexpected end of file reached.
func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.", msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
return ErrorResponse{ return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "UnexpectedEOF", Code: "UnexpectedEOF",
Message: msg, Message: msg,
BucketName: bucketName, BucketName: bucketName,
@ -218,18 +232,20 @@ func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
// ErrInvalidBucketName - Invalid bucket name response. // ErrInvalidBucketName - Invalid bucket name response.
func ErrInvalidBucketName(message string) error { func ErrInvalidBucketName(message string) error {
return ErrorResponse{ return ErrorResponse{
Code: "InvalidBucketName", StatusCode: http.StatusBadRequest,
Message: message, Code: "InvalidBucketName",
RequestID: "minio", Message: message,
RequestID: "minio",
} }
} }
// ErrInvalidObjectName - Invalid object name response. // ErrInvalidObjectName - Invalid object name response.
func ErrInvalidObjectName(message string) error { func ErrInvalidObjectName(message string) error {
return ErrorResponse{ return ErrorResponse{
Code: "NoSuchKey", StatusCode: http.StatusNotFound,
Message: message, Code: "NoSuchKey",
RequestID: "minio", Message: message,
RequestID: "minio",
} }
} }
@ -240,9 +256,10 @@ var ErrInvalidObjectPrefix = ErrInvalidObjectName
// ErrInvalidArgument - Invalid argument response. // ErrInvalidArgument - Invalid argument response.
func ErrInvalidArgument(message string) error { func ErrInvalidArgument(message string) error {
return ErrorResponse{ return ErrorResponse{
Code: "InvalidArgument", StatusCode: http.StatusBadRequest,
Message: message, Code: "InvalidArgument",
RequestID: "minio", Message: message,
RequestID: "minio",
} }
} }
@ -250,9 +267,10 @@ func ErrInvalidArgument(message string) error {
// The specified bucket does not have a bucket policy. // The specified bucket does not have a bucket policy.
func ErrNoSuchBucketPolicy(message string) error { func ErrNoSuchBucketPolicy(message string) error {
return ErrorResponse{ return ErrorResponse{
Code: "NoSuchBucketPolicy", StatusCode: http.StatusNotFound,
Message: message, Code: "NoSuchBucketPolicy",
RequestID: "minio", Message: message,
RequestID: "minio",
} }
} }
@ -260,8 +278,9 @@ func ErrNoSuchBucketPolicy(message string) error {
// The specified API call is not supported // The specified API call is not supported
func ErrAPINotSupported(message string) error { func ErrAPINotSupported(message string) error {
return ErrorResponse{ return ErrorResponse{
Code: "APINotSupported", StatusCode: http.StatusNotImplemented,
Message: message, Code: "APINotSupported",
RequestID: "minio", Message: message,
RequestID: "minio",
} }
} }

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -126,7 +127,7 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName
} else { } else {
// First request is a Stat or Seek call. // First request is a Stat or Seek call.
// Only need to run a StatObject until an actual Read or ReadAt request comes through. // Only need to run a StatObject until an actual Read or ReadAt request comes through.
objectInfo, err = c.statObject(bucketName, objectName, StatObjectOptions{opts}) objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
if err != nil { if err != nil {
resCh <- getResponse{ resCh <- getResponse{
Error: err, Error: err,
@ -144,7 +145,7 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName
if etag != "" { if etag != "" {
opts.SetMatchETag(etag) opts.SetMatchETag(etag)
} }
objectInfo, err := c.statObject(bucketName, objectName, StatObjectOptions{opts}) objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
if err != nil { if err != nil {
resCh <- getResponse{ resCh <- getResponse{
Error: err, Error: err,
@ -612,10 +613,10 @@ func (c Client) getObject(ctx context.Context, bucketName, objectName string, op
// Execute GET on objectName. // Execute GET on objectName.
resp, err := c.executeMethod(ctx, "GET", requestMetadata{ resp, err := c.executeMethod(ctx, "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
customHeader: opts.Header(), customHeader: opts.Header(),
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
if err != nil { if err != nil {
return nil, ObjectInfo{}, err return nil, ObjectInfo{}, err

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016-17 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -81,9 +82,9 @@ func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, e
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -39,7 +40,7 @@ import (
// //
func (c Client) ListBuckets() ([]BucketInfo, error) { func (c Client) ListBuckets() ([]BucketInfo, error) {
// Execute GET on service. // Execute GET on service.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Bytes: emptySHA256}) resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex})
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return nil, err return nil, err
@ -217,9 +218,9 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -395,9 +396,9 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -574,9 +575,9 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
// Execute GET on bucketName to list multipart uploads. // Execute GET on bucketName to list multipart uploads.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -692,10 +693,10 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
// Execute GET on objectName to get list of parts. // Execute GET on objectName to get list of parts.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -48,9 +49,9 @@ func (c Client) getBucketNotification(bucketName string) (BucketNotification, er
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
@ -151,7 +152,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
// Check ARN partition to verify if listening bucket is supported // Check ARN partition to verify if listening bucket is supported
if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) { if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
notificationInfoCh <- NotificationInfo{ notificationInfoCh <- NotificationInfo{
Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"), Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
} }
return return
} }
@ -172,12 +173,15 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
if err != nil { if err != nil {
continue notificationInfoCh <- NotificationInfo{
Err: err,
}
return
} }
// Validate http response, upon error return quickly. // Validate http response, upon error return quickly.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,6 +19,7 @@ package minio
import ( import (
"errors" "errors"
"net/http"
"net/url" "net/url"
"time" "time"
@ -25,16 +27,6 @@ import (
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
) )
// supportedGetReqParams - supported request parameters for GET presigned request.
var supportedGetReqParams = map[string]struct{}{
"response-expires": {},
"response-content-type": {},
"response-cache-control": {},
"response-content-language": {},
"response-content-encoding": {},
"response-content-disposition": {},
}
// presignURL - Returns a presigned URL for an input 'method'. // presignURL - Returns a presigned URL for an input 'method'.
// Expires maximum is 7days - ie. 604800 and minimum is 1. // Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
@ -42,42 +34,27 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
if method == "" { if method == "" {
return nil, ErrInvalidArgument("method cannot be empty.") return nil, ErrInvalidArgument("method cannot be empty.")
} }
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err return nil, err
} }
if err := isValidExpiry(expires); err != nil { if err = isValidExpiry(expires); err != nil {
return nil, err return nil, err
} }
// Convert expires into seconds. // Convert expires into seconds.
expireSeconds := int64(expires / time.Second) expireSeconds := int64(expires / time.Second)
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
presignURL: true, presignURL: true,
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
expires: expireSeconds, expires: expireSeconds,
} queryValues: reqParams,
// For "GET" we are handling additional request parameters to
// override its response headers.
if method == "GET" {
// Verify if input map has unsupported params, if yes exit.
for k := range reqParams {
if _, ok := supportedGetReqParams[k]; !ok {
return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
}
}
// Save the request parameters to be used in presigning for GET request.
reqMetadata.queryValues = reqParams
} }
// Instantiate a new request. // Instantiate a new request.
// Since expires is set newRequest will presign the request. // Since expires is set newRequest will presign the request.
req, err := c.newRequest(method, reqMetadata) var req *http.Request
if err != nil { if req, err = c.newRequest(method, reqMetadata); err != nil {
return nil, err return nil, err
} }
return req.URL, nil return req.URL, nil
@ -88,6 +65,9 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
// upto 7days or a minimum of 1sec. Additionally you can override // upto 7days or a minimum of 1sec. Additionally you can override
// a set of response headers using the query parameters. // a set of response headers using the query parameters.
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
return c.presignURL("GET", bucketName, objectName, expires, reqParams) return c.presignURL("GET", bucketName, objectName, expires, reqParams)
} }
@ -96,6 +76,9 @@ func (c Client) PresignedGetObject(bucketName string, objectName string, expires
// upto 7days or a minimum of 1sec. Additionally you can override // upto 7days or a minimum of 1sec. Additionally you can override
// a set of response headers using the query parameters. // a set of response headers using the query parameters.
func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
return c.presignURL("HEAD", bucketName, objectName, expires, reqParams) return c.presignURL("HEAD", bucketName, objectName, expires, reqParams)
} }
@ -103,6 +86,9 @@ func (c Client) PresignedHeadObject(bucketName string, objectName string, expire
// without credentials. URL can have a maximum expiry of upto 7days // without credentials. URL can have a maximum expiry of upto 7days
// or a minimum of 1sec. // or a minimum of 1sec.
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
return c.presignURL("PUT", bucketName, objectName, expires, nil) return c.presignURL("PUT", bucketName, objectName, expires, nil)
} }

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2015, 2016, 2017 Minio, Inc. * Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -76,8 +76,8 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
if err != nil { if err != nil {
return err return err
} }
reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes) reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes)
reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes) reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes)
reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
reqMetadata.contentLength = int64(len(createBucketConfigBytes)) reqMetadata.contentLength = int64(len(createBucketConfigBytes))
} }
@ -162,12 +162,12 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces
policyBuffer := bytes.NewReader(policyBytes) policyBuffer := bytes.NewReader(policyBytes)
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentBody: policyBuffer, contentBody: policyBuffer,
contentLength: int64(len(policyBytes)), contentLength: int64(len(policyBytes)),
contentMD5Bytes: sumMD5(policyBytes), contentMD5Base64: sumMD5Base64(policyBytes),
contentSHA256Bytes: sum256(policyBytes), contentSHA256Hex: sum256Hex(policyBytes),
} }
// Execute PUT to upload a new bucket policy. // Execute PUT to upload a new bucket policy.
@ -197,9 +197,9 @@ func (c Client) removeBucketPolicy(bucketName string) error {
// Execute DELETE on objectName. // Execute DELETE on objectName.
resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -227,12 +227,12 @@ func (c Client) SetBucketNotification(bucketName string, bucketNotification Buck
notifBuffer := bytes.NewReader(notifBytes) notifBuffer := bytes.NewReader(notifBytes)
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentBody: notifBuffer, contentBody: notifBuffer,
contentLength: int64(len(notifBytes)), contentLength: int64(len(notifBytes)),
contentMD5Bytes: sumMD5(notifBytes), contentMD5Base64: sumMD5Base64(notifBytes),
contentSHA256Bytes: sum256(notifBytes), contentSHA256Hex: sum256Hex(notifBytes),
} }
// Execute PUT to upload a new bucket notification. // Execute PUT to upload a new bucket notification.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -25,12 +26,6 @@ import (
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
) )
// Verify if reader is *os.File
func isFile(reader io.Reader) (ok bool) {
_, ok = reader.(*os.File)
return
}
// Verify if reader is *minio.Object // Verify if reader is *minio.Object
func isObject(reader io.Reader) (ok bool) { func isObject(reader io.Reader) (ok bool) {
_, ok = reader.(*Object) _, ok = reader.(*Object)
@ -40,6 +35,26 @@ func isObject(reader io.Reader) (ok bool) {
// Verify if reader is a generic ReaderAt // Verify if reader is a generic ReaderAt
func isReadAt(reader io.Reader) (ok bool) { func isReadAt(reader io.Reader) (ok bool) {
_, ok = reader.(io.ReaderAt) _, ok = reader.(io.ReaderAt)
if ok {
var v *os.File
v, ok = reader.(*os.File)
if ok {
// Stdin, Stdout and Stderr all have *os.File type
// which happen to also be io.ReaderAt compatible
// we need to add special conditions for them to
// be ignored by this function.
for _, f := range []string{
"/dev/stdin",
"/dev/stdout",
"/dev/stderr",
} {
if f == v.Name() {
ok = false
break
}
}
}
}
return return
} }

@ -1,17 +1,18 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* * Copyright 2017 Minio, Inc.
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
* You may obtain a copy of the License at * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0 *
* * http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* limitations under the License. * See the License for the specific language governing permissions and
* limitations under the License.
*/ */
package minio package minio

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,17 +1,18 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* * Copyright 2017 Minio, Inc.
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
* You may obtain a copy of the License at * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0 *
* * http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* limitations under the License. * See the License for the specific language governing permissions and
* limitations under the License.
*/ */
package minio package minio

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -19,6 +20,8 @@ package minio
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/base64"
"encoding/hex"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io" "io"
@ -120,10 +123,22 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje
// as we read from the source. // as we read from the source.
rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
// Checksums..
var (
md5Base64 string
sha256Hex string
)
if hashSums["md5"] != nil {
md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
}
if hashSums["sha256"] != nil {
sha256Hex = hex.EncodeToString(hashSums["sha256"])
}
// Proceed to upload the part. // Proceed to upload the part.
var objPart ObjectPart var objPart ObjectPart
objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
hashSums["md5"], hashSums["sha256"], int64(length), opts.UserMetadata) md5Base64, sha256Hex, int64(length), opts.UserMetadata)
if err != nil { if err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }
@ -215,7 +230,7 @@ const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
// uploadPart - Uploads a part in a multipart upload. // uploadPart - Uploads a part in a multipart upload.
func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string]string) (ObjectPart, error) { partNumber int, md5Base64, sha256Hex string, size int64, metadata map[string]string) (ObjectPart, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectPart{}, err return ObjectPart{}, err
@ -254,14 +269,14 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID
} }
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
queryValues: urlValues, queryValues: urlValues,
customHeader: customHeader, customHeader: customHeader,
contentBody: reader, contentBody: reader,
contentLength: size, contentLength: size,
contentMD5Bytes: md5Sum, contentMD5Base64: md5Base64,
contentSHA256Bytes: sha256Sum, contentSHA256Hex: sha256Hex,
} }
// Execute PUT on each part. // Execute PUT on each part.
@ -308,12 +323,12 @@ func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectN
// Instantiate all the complete multipart buffer. // Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
queryValues: urlValues, queryValues: urlValues,
contentBody: completeMultipartUploadBuffer, contentBody: completeMultipartUploadBuffer,
contentLength: int64(len(completeMultipartUploadBytes)), contentLength: int64(len(completeMultipartUploadBytes)),
contentSHA256Bytes: sum256(completeMultipartUploadBytes), contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
} }
// Execute POST to complete multipart upload for an objectName. // Execute POST to complete multipart upload for an objectName.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -33,19 +34,14 @@ import (
// //
// Following code handles these types of readers. // Following code handles these types of readers.
// //
// - *os.File
// - *minio.Object // - *minio.Object
// - Any reader which has a method 'ReadAt()' // - Any reader which has a method 'ReadAt()'
// //
func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// Verify if reader is *minio.Object, *os.File or io.ReaderAt. if !isObject(reader) && isReadAt(reader) {
// NOTE: Verification of object is kept for a specific purpose // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
// while it is going to be duck typed similar to io.ReaderAt.
// It is to indicate that *minio.Object implements io.ReaderAt.
// and such a functionality is used in the subsequent code path.
if isFile(reader) || !isObject(reader) && isReadAt(reader) {
n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
} else { } else {
n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts) n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts)
@ -171,7 +167,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa
var objPart ObjectPart var objPart ObjectPart
objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
sectionReader, uploadReq.PartNum, sectionReader, uploadReq.PartNum,
nil, nil, partSize, opts.UserMetadata) "", "", partSize, opts.UserMetadata)
if err != nil { if err != nil {
uploadedPartsCh <- uploadedPartRes{ uploadedPartsCh <- uploadedPartRes{
Size: 0, Size: 0,
@ -284,7 +280,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketNa
var objPart ObjectPart var objPart ObjectPart
objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
io.LimitReader(hookReader, partSize), io.LimitReader(hookReader, partSize),
partNumber, nil, nil, partSize, opts.UserMetadata) partNumber, "", "", partSize, opts.UserMetadata)
if err != nil { if err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }
@ -348,7 +344,12 @@ func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName
} }
if size > 0 { if size > 0 {
if isReadAt(reader) && !isObject(reader) { if isReadAt(reader) && !isObject(reader) {
reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size) seeker, _ := reader.(io.Seeker)
offset, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return 0, ErrInvalidArgument(err.Error())
}
reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
} }
} }
@ -358,7 +359,7 @@ func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName
// This function does not calculate sha256 and md5sum for payload. // This function does not calculate sha256 and md5sum for payload.
// Execute put object. // Execute put object.
st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, nil, nil, size, opts) st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -370,7 +371,7 @@ func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName
// putObjectDo - executes the put object http operation. // putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it. // NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, opts PutObjectOptions) (ObjectInfo, error) { func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
@ -383,13 +384,13 @@ func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string,
// Populate request metadata. // Populate request metadata.
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
customHeader: customHeader, customHeader: customHeader,
contentBody: reader, contentBody: reader,
contentLength: size, contentLength: size,
contentMD5Bytes: md5Sum, contentMD5Base64: md5Base64,
contentSHA256Bytes: sha256Sum, contentSHA256Hex: sha256Hex,
} }
// Execute PUT an objectName. // Execute PUT an objectName.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -24,7 +25,6 @@ import (
"net/http" "net/http"
"runtime/debug" "runtime/debug"
"sort" "sort"
"strings"
"github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
@ -78,7 +78,7 @@ func (opts PutObjectOptions) Header() (header http.Header) {
header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()} header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()}
} }
for k, v := range opts.UserMetadata { for k, v := range opts.UserMetadata {
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) { if !isAmzHeader(k) && !isStandardHeader(k) && !isSSEHeader(k) {
header["X-Amz-Meta-"+k] = []string{v} header["X-Amz-Meta-"+k] = []string{v}
} else { } else {
header[k] = []string{v} header[k] = []string{v}
@ -209,7 +209,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName
// Proceed to upload the part. // Proceed to upload the part.
var objPart ObjectPart var objPart ObjectPart
objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
nil, nil, int64(length), opts.UserMetadata) "", "", int64(length), opts.UserMetadata)
if err != nil { if err != nil {
return totalUploadedSize, err return totalUploadedSize, err
} }

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -38,8 +39,8 @@ func (c Client) RemoveBucket(bucketName string) error {
} }
// Execute DELETE on bucket. // Execute DELETE on bucket.
resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -68,9 +69,9 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
} }
// Execute DELETE on objectName. // Execute DELETE on objectName.
resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -189,12 +190,12 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
removeBytes := generateRemoveMultiObjectsRequest(batch) removeBytes := generateRemoveMultiObjectsRequest(batch)
// Execute GET on bucket to list objects. // Execute GET on bucket to list objects.
resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{ resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
queryValues: urlValues, queryValues: urlValues,
contentBody: bytes.NewReader(removeBytes), contentBody: bytes.NewReader(removeBytes),
contentLength: int64(len(removeBytes)), contentLength: int64(len(removeBytes)),
contentMD5Bytes: sumMD5(removeBytes), contentMD5Base64: sumMD5Base64(removeBytes),
contentSHA256Bytes: sum256(removeBytes), contentSHA256Hex: sum256Hex(removeBytes),
}) })
if err != nil { if err != nil {
for _, b := range batch { for _, b := range batch {
@ -253,10 +254,10 @@ func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName
// Execute DELETE on multipart upload. // Execute DELETE on multipart upload.
resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{ resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
queryValues: urlValues, queryValues: urlValues,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -35,8 +36,8 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
// Execute HEAD on bucketName. // Execute HEAD on bucketName.
resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -89,11 +90,11 @@ func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions
if err := s3utils.CheckValidObjectName(objectName); err != nil { if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
} }
return c.statObject(bucketName, objectName, opts) return c.statObject(context.Background(), bucketName, objectName, opts)
} }
// Lower level API for statObject supporting pre-conditions and range headers. // Lower level API for statObject supporting pre-conditions and range headers.
func (c Client) statObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
// Input validation. // Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil { if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
@ -103,11 +104,11 @@ func (c Client) statObject(bucketName, objectName string, opts StatObjectOptions
} }
// Execute HEAD on objectName. // Execute HEAD on objectName.
resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
contentSHA256Bytes: emptySHA256, contentSHA256Hex: emptySHA256Hex,
customHeader: opts.Header(), customHeader: opts.Header(),
}) })
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2015, 2016, 2017 Minio, Inc. * Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -22,8 +22,6 @@ import (
"context" "context"
"crypto/md5" "crypto/md5"
"crypto/sha256" "crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors" "errors"
"fmt" "fmt"
"hash" "hash"
@ -88,7 +86,7 @@ type Client struct {
// Global constants. // Global constants.
const ( const (
libraryName = "minio-go" libraryName = "minio-go"
libraryVersion = "4.0.0" libraryVersion = "4.0.4"
) )
// User Agent should always following the below style. // User Agent should always following the below style.
@ -179,18 +177,6 @@ func (r *lockedRandSource) Seed(seed int64) {
r.lk.Unlock() r.lk.Unlock()
} }
// redirectHeaders copies all headers when following a redirect URL.
// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
func redirectHeaders(req *http.Request, via []*http.Request) error {
if len(via) == 0 {
return nil
}
for key, val := range via[0].Header {
req.Header[key] = val
}
return nil
}
// getRegionFromURL - parse region from URL if present. // getRegionFromURL - parse region from URL if present.
func getRegionFromURL(u url.URL) (region string) { func getRegionFromURL(u url.URL) (region string) {
region = "" region = ""
@ -237,8 +223,7 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re
// Instantiate http client and bucket location cache. // Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{ clnt.httpClient = &http.Client{
Transport: defaultMinioTransport, Transport: defaultMinioTransport,
CheckRedirect: redirectHeaders,
} }
// Sets custom region, if region is empty bucket location cache is used automatically. // Sets custom region, if region is empty bucket location cache is used automatically.
@ -357,11 +342,11 @@ type requestMetadata struct {
expires int64 expires int64
// Generated by our internal code. // Generated by our internal code.
bucketLocation string bucketLocation string
contentBody io.Reader contentBody io.Reader
contentLength int64 contentLength int64
contentSHA256Bytes []byte contentMD5Base64 string // carries base64 encoded md5sum
contentMD5Bytes []byte contentSHA256Hex string // carries hex encoded sha256sum
} }
// dumpHTTP - dump HTTP request and response. // dumpHTTP - dump HTTP request and response.
@ -730,8 +715,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
} }
// set md5Sum for content protection. // set md5Sum for content protection.
if len(metadata.contentMD5Bytes) > 0 { if len(metadata.contentMD5Base64) > 0 {
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) req.Header.Set("Content-Md5", metadata.contentMD5Base64)
} }
// For anonymous requests just return. // For anonymous requests just return.
@ -752,8 +737,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
default: default:
// Set sha256 sum for signature calculation only with signature version '4'. // Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload shaHeader := unsignedPayload
if len(metadata.contentSHA256Bytes) > 0 { if metadata.contentSHA256Hex != "" {
shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes) shaHeader = metadata.contentSHA256Hex
} }
req.Header.Set("X-Amz-Content-Sha256", shaHeader) req.Header.Set("X-Amz-Content-Sha256", shaHeader)

@ -1,6 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2015, 2016, 2017 Minio, Inc. * Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,7 +18,6 @@
package minio package minio
import ( import (
"encoding/hex"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@ -209,11 +208,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
} }
// Set sha256 sum for signature calculation only with signature version '4'. // Set sha256 sum for signature calculation only with signature version '4'.
var contentSha256 string contentSha256 := emptySHA256Hex
if c.secure { if c.secure {
contentSha256 = unsignedPayload contentSha256 = unsignedPayload
} else {
contentSha256 = hex.EncodeToString(sum256([]byte{}))
} }
req.Header.Set("X-Amz-Content-Sha256", contentSha256) req.Header.Set("X-Amz-Content-Sha256", contentSha256)

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -59,8 +60,17 @@ func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject stri
return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata) return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata)
} }
// CopyObjectPart - creates a part in a multipart upload by copying (a
// part of) an existing object.
func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string,
partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) {
return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID,
partID, startOffset, length, metadata)
}
// PutObject - Upload object. Uploads using single PUT call. // PutObject - Upload object. Uploads using single PUT call.
func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Sum, sha256Sum []byte, metadata map[string]string) (ObjectInfo, error) { func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectInfo, error) {
opts := PutObjectOptions{} opts := PutObjectOptions{}
m := make(map[string]string) m := make(map[string]string)
for k, v := range metadata { for k, v := range metadata {
@ -77,7 +87,7 @@ func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Su
} }
} }
opts.UserMetadata = m opts.UserMetadata = m
return c.putObjectDo(context.Background(), bucket, object, data, md5Sum, sha256Sum, size, opts) return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts)
} }
// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. // NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
@ -92,14 +102,14 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
} }
// PutObjectPart - Upload an object part. // PutObjectPart - Upload an object part.
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Sum, sha256Sum []byte) (ObjectPart, error) { func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) {
return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Sum, sha256Sum, nil) return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, nil)
} }
// PutObjectPartWithMetadata - upload an object part with additional request metadata. // PutObjectPartWithMetadata - upload an object part with additional request metadata.
func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader, func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader,
size int64, md5Sum, sha256Sum []byte, metadata map[string]string) (ObjectPart, error) { size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectPart, error) {
return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata) return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, metadata)
} }
// ListObjectParts - List uploaded parts of an incomplete upload.x // ListObjectParts - List uploaded parts of an incomplete upload.x
@ -140,5 +150,5 @@ func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (i
// StatObject is a lower level API implemented to support special // StatObject is a lower level API implemented to support special
// conditions matching etag, modtime on a request. // conditions matching etag, modtime on a request.
func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
return c.statObject(bucketName, objectName, opts) return c.statObject(context.Background(), bucketName, objectName, opts)
} }

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,3 +1,20 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio package minio
import ( import (
@ -167,6 +184,28 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error {
return nil return nil
} }
// SetUserMetadata - Set user metadata as a key/value couple.
// Can be retrieved through a HEAD request or an event.
func (p *PostPolicy) SetUserMetadata(key string, value string) error {
if strings.TrimSpace(key) == "" || key == "" {
return ErrInvalidArgument("Key is empty")
}
if strings.TrimSpace(value) == "" || value == "" {
return ErrInvalidArgument("Value is empty")
}
headerName := fmt.Sprintf("x-amz-meta-%s", key)
policyCond := policyCondition{
matchType: "eq",
condition: fmt.Sprintf("$%s", headerName),
value: value,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData[headerName] = value
return nil
}
// addNewPolicy - internal helper to validate adding new policies. // addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {

@ -1,3 +1,20 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio package minio
import "time" import "time"

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -2,7 +2,7 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage * Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc. * Copyright 2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

@ -1,5 +1,6 @@
/* /*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. * Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -19,6 +20,8 @@ package minio
import ( import (
"crypto/md5" "crypto/md5"
"crypto/sha256" "crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/xml" "encoding/xml"
"io" "io"
"io/ioutil" "io/ioutil"
@ -38,18 +41,18 @@ func xmlDecoder(body io.Reader, v interface{}) error {
return d.Decode(v) return d.Decode(v)
} }
// sum256 calculate sha256 sum for an input byte array. // sum256 calculate sha256sum for an input byte array, returns hex encoded.
func sum256(data []byte) []byte { func sum256Hex(data []byte) string {
hash := sha256.New() hash := sha256.New()
hash.Write(data) hash.Write(data)
return hash.Sum(nil) return hex.EncodeToString(hash.Sum(nil))
} }
// sumMD5 calculate sumMD5 sum for an input byte array. // sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded.
func sumMD5(data []byte) []byte { func sumMD5Base64(data []byte) string {
hash := md5.New() hash := md5.New()
hash.Write(data) hash.Write(data)
return hash.Sum(nil) return base64.StdEncoding.EncodeToString(hash.Sum(nil))
} }
// getEndpointURL - construct a new endpoint. // getEndpointURL - construct a new endpoint.
@ -109,10 +112,13 @@ func closeResponse(resp *http.Response) {
} }
} }
var emptySHA256 = sum256(nil) var (
// Hex encoded string of nil sha256sum bytes.
emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
// Sentinel URL is the default url value which is invalid. // Sentinel URL is the default url value which is invalid.
var sentinelURL = url.URL{} sentinelURL = url.URL{}
)
// Verify if input endpoint URL is valid. // Verify if input endpoint URL is valid.
func isValidEndpointURL(endpointURL url.URL) error { func isValidEndpointURL(endpointURL url.URL) error {
@ -230,8 +236,9 @@ var cseHeaders = []string{
// isStandardHeader returns true if header is a supported header and not a custom header // isStandardHeader returns true if header is a supported header and not a custom header
func isStandardHeader(headerKey string) bool { func isStandardHeader(headerKey string) bool {
key := strings.ToLower(headerKey)
for _, header := range supportedHeaders { for _, header := range supportedHeaders {
if strings.Compare(strings.ToLower(headerKey), header) == 0 { if strings.ToLower(header) == key {
return true return true
} }
} }
@ -250,3 +257,31 @@ func isCSEHeader(headerKey string) bool {
} }
return false return false
} }
// sseHeaders is list of server side encryption headers
var sseHeaders = []string{
"x-amz-server-side-encryption",
"x-amz-server-side-encryption-aws-kms-key-id",
"x-amz-server-side-encryption-context",
"x-amz-server-side-encryption-customer-algorithm",
"x-amz-server-side-encryption-customer-key",
"x-amz-server-side-encryption-customer-key-MD5",
}
// isSSEHeader returns true if header is a server side encryption header.
func isSSEHeader(headerKey string) bool {
key := strings.ToLower(headerKey)
for _, h := range sseHeaders {
if strings.ToLower(h) == key {
return true
}
}
return false
}
// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
func isAmzHeader(headerKey string) bool {
key := strings.ToLower(headerKey)
return strings.HasPrefix(key, "x-amz-meta-") || key == "x-amz-acl"
}

@ -342,10 +342,10 @@
"revisionTime": "2016-02-29T08:42:30-08:00" "revisionTime": "2016-02-29T08:42:30-08:00"
}, },
{ {
"checksumSHA1": "EkdIh5Mk2bRiARtdoqUfnBuyndk=", "checksumSHA1": "EjYaYvofsB9eAyRXSwRLXX12778=",
"path": "github.com/minio/minio-go", "path": "github.com/minio/minio-go",
"revision": "9690dc6c40e6ef271727848c04f974e801212ac1", "revision": "b3f9ea44f05f3fc176fc2c510e7922be09f709d4",
"revisionTime": "2017-10-02T19:34:27Z" "revisionTime": "2017-11-20T20:37:48Z"
}, },
{ {
"checksumSHA1": "5juljGXPkBWENR2Os7dlnPQER48=", "checksumSHA1": "5juljGXPkBWENR2Os7dlnPQER48=",

Loading…
Cancel
Save