From 87fb911d3887820f3f89f209f01043d6cf083e9f Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 15 May 2017 00:52:33 -0700 Subject: [PATCH] Rename structs for azure and s3 gateway to be consistent. (#4347) --- cmd/gateway-azure-anonymous.go | 14 ++++---- cmd/gateway-azure-unsupported.go | 10 +++--- cmd/gateway-azure.go | 56 ++++++++++++++++---------------- cmd/gateway-s3-anonymous.go | 10 +++--- cmd/gateway-s3-unsupported.go | 10 +++--- cmd/gateway-s3.go | 54 +++++++++++++++--------------- 6 files changed, 77 insertions(+), 77 deletions(-) diff --git a/cmd/gateway-azure-anonymous.go b/cmd/gateway-azure-anonymous.go index 48b936343..c8f35e265 100644 --- a/cmd/gateway-azure-anonymous.go +++ b/cmd/gateway-azure-anonymous.go @@ -30,7 +30,7 @@ import ( ) // AnonGetBucketInfo - Get bucket metadata from azure anonymously. -func (a AzureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) { +func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) { url, err := url.Parse(a.client.GetBlobURL(bucket, "")) if err != nil { return bucketInfo, azureToObjectError(traceError(err)) @@ -40,7 +40,7 @@ func (a AzureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, e if err != nil { return bucketInfo, azureToObjectError(traceError(err), bucket) } - defer resp.Body.Close() + resp.Body.Close() if resp.StatusCode != http.StatusOK { return bucketInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket)), bucket) @@ -59,14 +59,14 @@ func (a AzureObjects) AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, e // AnonPutObject - SendPUT request without authentication. // This is needed when clients send PUT requests on objects that can be uploaded without auth. -func (a AzureObjects) AnonPutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) { +func (a *azureObjects) AnonPutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) { // azure doesn't support anonymous put return ObjectInfo{}, traceError(NotImplemented{}) } // AnonGetObject - SendGET request without authentication. // This is needed when clients send GET requests on objects that can be downloaded without auth. -func (a AzureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) { +func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) { u := a.client.GetBlobURL(bucket, object) req, err := http.NewRequest("GET", u, nil) if err != nil { @@ -95,12 +95,12 @@ func (a AzureObjects) AnonGetObject(bucket, object string, startOffset int64, le // AnonGetObjectInfo - Send HEAD request without authentication and convert the // result to ObjectInfo. -func (a AzureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { +func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { resp, err := http.Head(a.client.GetBlobURL(bucket, object)) if err != nil { return objInfo, azureToObjectError(traceError(err), bucket, object) } - defer resp.Body.Close() + resp.Body.Close() if resp.StatusCode != http.StatusOK { return objInfo, azureToObjectError(traceError(anonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object) @@ -135,7 +135,7 @@ func (a AzureObjects) AnonGetObjectInfo(bucket, object string) (objInfo ObjectIn } // AnonListObjects - Use Azure equivalent ListBlobs. -func (a AzureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { +func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { params := storage.ListBlobsParameters{ Prefix: prefix, Marker: marker, diff --git a/cmd/gateway-azure-unsupported.go b/cmd/gateway-azure-unsupported.go index c7468e367..c292275cb 100644 --- a/cmd/gateway-azure-unsupported.go +++ b/cmd/gateway-azure-unsupported.go @@ -17,27 +17,27 @@ package cmd // HealBucket - Not relevant. -func (a AzureObjects) HealBucket(bucket string) error { +func (a *azureObjects) HealBucket(bucket string) error { return traceError(NotImplemented{}) } // ListBucketsHeal - Not relevant. -func (a AzureObjects) ListBucketsHeal() (buckets []BucketInfo, err error) { +func (a *azureObjects) ListBucketsHeal() (buckets []BucketInfo, err error) { return nil, traceError(NotImplemented{}) } // HealObject - Not relevant. -func (a AzureObjects) HealObject(bucket, object string) (int, int, error) { +func (a *azureObjects) HealObject(bucket, object string) (int, int, error) { return 0, 0, traceError(NotImplemented{}) } // ListObjectsHeal - Not relevant. -func (a AzureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (a *azureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { return ListObjectsInfo{}, traceError(NotImplemented{}) } // ListUploadsHeal - Not relevant. -func (a AzureObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker, +func (a *azureObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { return ListMultipartsInfo{}, traceError(NotImplemented{}) } diff --git a/cmd/gateway-azure.go b/cmd/gateway-azure.go index 22082b11c..2c5e87949 100644 --- a/cmd/gateway-azure.go +++ b/cmd/gateway-azure.go @@ -64,8 +64,8 @@ func (a *azureMultipartMetaInfo) del(key string) { delete(a.meta, key) } -// AzureObjects - Implements Object layer for Azure blob storage. -type AzureObjects struct { +// azureObjects - Implements Object layer for Azure blob storage. +type azureObjects struct { client storage.BlobStorageClient // Azure sdk client metaInfo azureMultipartMetaInfo } @@ -122,16 +122,16 @@ func azureToObjectError(err error, params ...string) error { return e } -// Inits azure blob storage client and returns AzureObjects. +// Inits azure blob storage client and returns azureObjects. func newAzureLayer(endPoint string, account, key string, secure bool) (GatewayLayer, error) { if endPoint == "" { endPoint = storage.DefaultBaseURL } c, err := storage.NewClient(account, key, endPoint, globalAzureAPIVersion, secure) if err != nil { - return AzureObjects{}, err + return &azureObjects{}, err } - return &AzureObjects{ + return &azureObjects{ client: c.GetBlobService(), metaInfo: azureMultipartMetaInfo{ meta: make(map[string]map[string]string), @@ -142,30 +142,30 @@ func newAzureLayer(endPoint string, account, key string, secure bool) (GatewayLa // Shutdown - save any gateway metadata to disk // if necessary and reload upon next restart. -func (a AzureObjects) Shutdown() error { +func (a *azureObjects) Shutdown() error { // TODO return nil } // StorageInfo - Not relevant to Azure backend. -func (a AzureObjects) StorageInfo() StorageInfo { +func (a *azureObjects) StorageInfo() StorageInfo { return StorageInfo{} } // MakeBucket - Create a new container on azure backend. -func (a AzureObjects) MakeBucket(bucket string) error { +func (a *azureObjects) MakeBucket(bucket string) error { // will never be called, only satisfy ObjectLayer interface return traceError(NotImplemented{}) } // MakeBucketWithLocation - Create a new container on azure backend. -func (a AzureObjects) MakeBucketWithLocation(bucket, location string) error { +func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error { err := a.client.CreateContainer(bucket, storage.ContainerAccessTypePrivate) return azureToObjectError(traceError(err), bucket) } // GetBucketInfo - Get bucket metadata.. -func (a AzureObjects) GetBucketInfo(bucket string) (BucketInfo, error) { +func (a *azureObjects) GetBucketInfo(bucket string) (BucketInfo, error) { // Azure does not have an equivalent call, hence use ListContainers. resp, err := a.client.ListContainers(storage.ListContainersParameters{ Prefix: bucket, @@ -188,7 +188,7 @@ func (a AzureObjects) GetBucketInfo(bucket string) (BucketInfo, error) { } // ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers. -func (a AzureObjects) ListBuckets() (buckets []BucketInfo, err error) { +func (a *azureObjects) ListBuckets() (buckets []BucketInfo, err error) { resp, err := a.client.ListContainers(storage.ListContainersParameters{}) if err != nil { return nil, azureToObjectError(traceError(err)) @@ -207,13 +207,13 @@ func (a AzureObjects) ListBuckets() (buckets []BucketInfo, err error) { } // DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer. -func (a AzureObjects) DeleteBucket(bucket string) error { +func (a *azureObjects) DeleteBucket(bucket string) error { return azureToObjectError(traceError(a.client.DeleteContainer(bucket)), bucket) } // ListObjects - lists all blobs on azure with in a container filtered by prefix // and marker, uses Azure equivalent ListBlobs. -func (a AzureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { +func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { resp, err := a.client.ListBlobs(bucket, storage.ListBlobsParameters{ Prefix: prefix, Marker: marker, @@ -250,7 +250,7 @@ func (a AzureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxK // // startOffset indicates the starting read location of the object. // length indicates the total length of the object. -func (a AzureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error { +func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error { byteRange := fmt.Sprintf("%d-", startOffset) if length > 0 && startOffset > 0 { byteRange = fmt.Sprintf("%d-%d", startOffset, startOffset+length-1) @@ -273,7 +273,7 @@ func (a AzureObjects) GetObject(bucket, object string, startOffset int64, length // GetObjectInfo - reads blob metadata properties and replies back ObjectInfo, // uses zure equivalent GetBlobProperties. -func (a AzureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { +func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) { prop, err := a.client.GetBlobProperties(bucket, object) if err != nil { return objInfo, azureToObjectError(traceError(err), bucket, object) @@ -311,7 +311,7 @@ func canonicalMetadata(metadata map[string]string) (canonical map[string]string) // PutObject - Create a new blob with the incoming data, // uses Azure equivalent CreateBlockBlobFromReader. -func (a AzureObjects) PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) { +func (a *azureObjects) PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) { var sha256Writer hash.Hash teeReader := data if sha256sum != "" { @@ -339,7 +339,7 @@ func (a AzureObjects) PutObject(bucket, object string, size int64, data io.Reade // CopyObject - Copies a blob from source container to destination container. // Uses Azure equivalent CopyBlob API. -func (a AzureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { +func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { err = a.client.CopyBlob(destBucket, destObject, a.client.GetBlobURL(srcBucket, srcObject)) if err != nil { return objInfo, azureToObjectError(traceError(err), srcBucket, srcObject) @@ -349,7 +349,7 @@ func (a AzureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject st // DeleteObject - Deletes a blob on azure container, uses Azure // equivalent DeleteBlob API. -func (a AzureObjects) DeleteObject(bucket, object string) error { +func (a *azureObjects) DeleteObject(bucket, object string) error { err := a.client.DeleteBlob(bucket, object, nil) if err != nil { return azureToObjectError(traceError(err), bucket, object) @@ -361,7 +361,7 @@ func (a AzureObjects) DeleteObject(bucket, object string) error { // FIXME: Full ListMultipartUploads is not supported yet. It is supported just enough to help our client libs to // support re-uploads. a.client.ListBlobs() can be made to return entries which include uncommitted blobs using // which we need to filter out the committed blobs to get the list of uncommitted blobs. -func (a AzureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { +func (a *azureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { result.MaxUploads = maxUploads result.Prefix = prefix result.Delimiter = delimiter @@ -377,7 +377,7 @@ func (a AzureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMa } // NewMultipartUpload - Use Azure equivalent CreateBlockBlob. -func (a AzureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) { +func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) { // Azure doesn't return a unique upload ID and we use object name in place of it. Azure allows multiple uploads to // co-exist as long as the user keeps the blocks uploaded (in block blobs) unique amongst concurrent upload attempts. // Each concurrent client, keeps its own blockID list which it can commit. @@ -393,7 +393,7 @@ func (a AzureObjects) NewMultipartUpload(bucket, object string, metadata map[str } // CopyObjectPart - Not implemented. -func (a AzureObjects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) { +func (a *azureObjects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) { return info, traceError(NotImplemented{}) } @@ -421,7 +421,7 @@ func azureParseBlockID(blockID string) (int, string, error) { } // PutObjectPart - Use Azure equivalent PutBlockWithLength. -func (a AzureObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error) { +func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error) { if meta := a.metaInfo.get(uploadID); meta == nil { return info, traceError(InvalidUploadID{}) } @@ -453,7 +453,7 @@ func (a AzureObjects) PutObjectPart(bucket, object, uploadID string, partID int, } // ListObjectParts - Use Azure equivalent GetBlockList. -func (a AzureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) { +func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) { result.Bucket = bucket result.Object = object result.UploadID = uploadID @@ -502,13 +502,13 @@ func (a AzureObjects) ListObjectParts(bucket, object, uploadID string, partNumbe // AbortMultipartUpload - Not Implemented. // There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks // gets deleted after one week. -func (a AzureObjects) AbortMultipartUpload(bucket, object, uploadID string) error { +func (a *azureObjects) AbortMultipartUpload(bucket, object, uploadID string) error { a.metaInfo.del(uploadID) return nil } // CompleteMultipartUpload - Use Azure equivalent PutBlockList. -func (a AzureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error) { +func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error) { meta := a.metaInfo.get(uploadID) if meta == nil { return objInfo, traceError(InvalidUploadID{uploadID}) @@ -598,7 +598,7 @@ func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values { // storage.ContainerAccessTypePrivate - none in minio terminology // As the common denominator for minio and azure is readonly and none, we support // these two policies at the bucket level. -func (a AzureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { +func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { var policies []BucketAccessPolicy for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) { @@ -626,7 +626,7 @@ func (a AzureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketA } // GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy -func (a AzureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { +func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} perm, err := a.client.GetContainerPermissions(bucket, 0, "") if err != nil { @@ -644,7 +644,7 @@ func (a AzureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolic } // DeleteBucketPolicies - Set the container ACL to "private" -func (a AzureObjects) DeleteBucketPolicies(bucket string) error { +func (a *azureObjects) DeleteBucketPolicies(bucket string) error { perm := storage.ContainerPermissions{ AccessType: storage.ContainerAccessTypePrivate, AccessPolicies: nil, diff --git a/cmd/gateway-s3-anonymous.go b/cmd/gateway-s3-anonymous.go index 4fbb1b716..147e8ea43 100644 --- a/cmd/gateway-s3-anonymous.go +++ b/cmd/gateway-s3-anonymous.go @@ -24,7 +24,7 @@ import ( ) // AnonPutObject creates a new object anonymously with the incoming data, -func (l *s3Gateway) AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) { +func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) { var sha256sumBytes []byte var err error @@ -54,7 +54,7 @@ func (l *s3Gateway) AnonPutObject(bucket string, object string, size int64, data } // AnonGetObject - Get object anonymously -func (l *s3Gateway) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { +func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { r := minio.NewGetReqHeaders() if err := r.SetRange(startOffset, startOffset+length-1); err != nil { return s3ToObjectError(traceError(err), bucket, key) @@ -74,7 +74,7 @@ func (l *s3Gateway) AnonGetObject(bucket string, key string, startOffset int64, } // AnonGetObjectInfo - Get object info anonymously -func (l *s3Gateway) AnonGetObjectInfo(bucket string, object string) (ObjectInfo, error) { +func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (ObjectInfo, error) { r := minio.NewHeadReqHeaders() oi, err := l.anonClient.StatObject(bucket, object, r) if err != nil { @@ -85,7 +85,7 @@ func (l *s3Gateway) AnonGetObjectInfo(bucket string, object string) (ObjectInfo, } // AnonListObjects - List objects anonymously -func (l *s3Gateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys) if err != nil { return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket) @@ -95,7 +95,7 @@ func (l *s3Gateway) AnonListObjects(bucket string, prefix string, marker string, } // AnonGetBucketInfo - Get bucket metadata anonymously. -func (l *s3Gateway) AnonGetBucketInfo(bucket string) (BucketInfo, error) { +func (l *s3Objects) AnonGetBucketInfo(bucket string) (BucketInfo, error) { if exists, err := l.anonClient.BucketExists(bucket); err != nil { return BucketInfo{}, s3ToObjectError(traceError(err), bucket) } else if !exists { diff --git a/cmd/gateway-s3-unsupported.go b/cmd/gateway-s3-unsupported.go index fbcabed76..13d167b8a 100644 --- a/cmd/gateway-s3-unsupported.go +++ b/cmd/gateway-s3-unsupported.go @@ -17,26 +17,26 @@ package cmd // HealBucket - Not relevant. -func (l *s3Gateway) HealBucket(bucket string) error { +func (l *s3Objects) HealBucket(bucket string) error { return traceError(NotImplemented{}) } // ListBucketsHeal - Not relevant. -func (l *s3Gateway) ListBucketsHeal() (buckets []BucketInfo, err error) { +func (l *s3Objects) ListBucketsHeal() (buckets []BucketInfo, err error) { return []BucketInfo{}, traceError(NotImplemented{}) } // HealObject - Not relevant. -func (l *s3Gateway) HealObject(bucket string, object string) (int, int, error) { +func (l *s3Objects) HealObject(bucket string, object string) (int, int, error) { return 0, 0, traceError(NotImplemented{}) } // ListObjectsHeal - Not relevant. -func (l *s3Gateway) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (l *s3Objects) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { return ListObjectsInfo{}, traceError(NotImplemented{}) } // ListUploadsHeal - Not relevant. -func (l *s3Gateway) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) { +func (l *s3Objects) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) { return ListMultipartsInfo{}, traceError(NotImplemented{}) } diff --git a/cmd/gateway-s3.go b/cmd/gateway-s3.go index 2a09d5fc7..f2c2e49b5 100644 --- a/cmd/gateway-s3.go +++ b/cmd/gateway-s3.go @@ -91,8 +91,8 @@ func s3ToObjectError(err error, params ...string) error { return e } -// s3Gateway implements gateway for Minio and S3 compatible object storage servers. -type s3Gateway struct { +// s3Objects implements gateway for Minio and S3 compatible object storage servers. +type s3Objects struct { Client *minio.Core anonClient *minio.Core } @@ -115,7 +115,7 @@ func newS3Gateway(endpoint string, accessKey, secretKey string, secure bool) (Ga return nil, err } - return &s3Gateway{ + return &s3Objects{ Client: client, anonClient: anonClient, }, nil @@ -123,24 +123,24 @@ func newS3Gateway(endpoint string, accessKey, secretKey string, secure bool) (Ga // Shutdown saves any gateway metadata to disk // if necessary and reload upon next restart. -func (l *s3Gateway) Shutdown() error { +func (l *s3Objects) Shutdown() error { // TODO return nil } // StorageInfo is not relevant to S3 backend. -func (l *s3Gateway) StorageInfo() StorageInfo { +func (l *s3Objects) StorageInfo() StorageInfo { return StorageInfo{} } // MakeBucket creates a new container on S3 backend. -func (l *s3Gateway) MakeBucket(bucket string) error { +func (l *s3Objects) MakeBucket(bucket string) error { // will never be called, only satisfy ObjectLayer interface return traceError(NotImplemented{}) } // MakeBucket creates a new container on S3 backend. -func (l *s3Gateway) MakeBucketWithLocation(bucket, location string) error { +func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error { err := l.Client.MakeBucket(bucket, location) if err != nil { return s3ToObjectError(traceError(err), bucket) @@ -149,7 +149,7 @@ func (l *s3Gateway) MakeBucketWithLocation(bucket, location string) error { } // GetBucketInfo gets bucket metadata.. -func (l *s3Gateway) GetBucketInfo(bucket string) (BucketInfo, error) { +func (l *s3Objects) GetBucketInfo(bucket string) (BucketInfo, error) { buckets, err := l.Client.ListBuckets() if err != nil { return BucketInfo{}, s3ToObjectError(traceError(err), bucket) @@ -170,7 +170,7 @@ func (l *s3Gateway) GetBucketInfo(bucket string) (BucketInfo, error) { } // ListBuckets lists all S3 buckets -func (l *s3Gateway) ListBuckets() ([]BucketInfo, error) { +func (l *s3Objects) ListBuckets() ([]BucketInfo, error) { buckets, err := l.Client.ListBuckets() if err != nil { return nil, err @@ -188,7 +188,7 @@ func (l *s3Gateway) ListBuckets() ([]BucketInfo, error) { } // DeleteBucket deletes a bucket on S3 -func (l *s3Gateway) DeleteBucket(bucket string) error { +func (l *s3Objects) DeleteBucket(bucket string) error { err := l.Client.RemoveBucket(bucket) if err != nil { return s3ToObjectError(traceError(err), bucket) @@ -197,7 +197,7 @@ func (l *s3Gateway) DeleteBucket(bucket string) error { } // ListObjects lists all blobs in S3 bucket filtered by prefix -func (l *s3Gateway) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) { result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys) if err != nil { return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket) @@ -207,7 +207,7 @@ func (l *s3Gateway) ListObjects(bucket string, prefix string, marker string, del } // ListObjectsV2 lists all blobs in S3 bucket filtered by prefix -func (l *s3Gateway) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) { +func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) { result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys) if err != nil { return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket) @@ -266,7 +266,7 @@ func fromMinioClientListBucketResult(bucket string, result minio.ListBucketResul // // startOffset indicates the starting read location of the object. // length indicates the total length of the object. -func (l *s3Gateway) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { +func (l *s3Objects) GetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer) error { r := minio.NewGetReqHeaders() if err := r.SetRange(startOffset, startOffset+length-1); err != nil { return s3ToObjectError(traceError(err), bucket, key) @@ -303,7 +303,7 @@ func fromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo { } // GetObjectInfo reads object info and replies back ObjectInfo -func (l *s3Gateway) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { +func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { r := minio.NewHeadReqHeaders() oi, err := l.Client.StatObject(bucket, object, r) if err != nil { @@ -314,7 +314,7 @@ func (l *s3Gateway) GetObjectInfo(bucket string, object string) (objInfo ObjectI } // PutObject creates a new object with the incoming data, -func (l *s3Gateway) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) { +func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) { var sha256sumBytes []byte var err error @@ -344,7 +344,7 @@ func (l *s3Gateway) PutObject(bucket string, object string, size int64, data io. } // CopyObject copies a blob from source container to destination container. -func (l *s3Gateway) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (ObjectInfo, error) { +func (l *s3Objects) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (ObjectInfo, error) { err := l.Client.CopyObject(destBucket, destObject, path.Join(srcBucket, srcObject), minio.CopyConditions{}) if err != nil { return ObjectInfo{}, s3ToObjectError(traceError(err), srcBucket, srcObject) @@ -359,7 +359,7 @@ func (l *s3Gateway) CopyObject(srcBucket string, srcObject string, destBucket st } // DeleteObject deletes a blob in bucket -func (l *s3Gateway) DeleteObject(bucket string, object string) error { +func (l *s3Objects) DeleteObject(bucket string, object string) error { err := l.Client.RemoveObject(bucket, object) if err != nil { return s3ToObjectError(traceError(err), bucket, object) @@ -407,7 +407,7 @@ func fromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) Li } // ListMultipartUploads lists all multipart uploads. -func (l *s3Gateway) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) { +func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) { result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) if err != nil { return ListMultipartsInfo{}, err @@ -435,12 +435,12 @@ func toMinioClientMetadata(metadata map[string]string) map[string][]string { } // NewMultipartUpload upload object in multiple parts -func (l *s3Gateway) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) { +func (l *s3Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (uploadID string, err error) { return l.Client.NewMultipartUpload(bucket, object, toMinioClientMetadata(metadata)) } // CopyObjectPart copy part of object to other bucket and object -func (l *s3Gateway) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) { +func (l *s3Objects) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) { // FIXME: implement CopyObjectPart return PartInfo{}, traceError(NotImplemented{}) } @@ -456,7 +456,7 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo { } // PutObjectPart puts a part of object in bucket -func (l *s3Gateway) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) { +func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) { md5HexBytes, err := hex.DecodeString(md5Hex) if err != nil { return PartInfo{}, err @@ -501,7 +501,7 @@ func fromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInf } // ListObjectParts returns all object parts for specified object in specified bucket -func (l *s3Gateway) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) { +func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) { result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) if err != nil { return ListPartsInfo{}, err @@ -511,7 +511,7 @@ func (l *s3Gateway) ListObjectParts(bucket string, object string, uploadID strin } // AbortMultipartUpload aborts a ongoing multipart upload -func (l *s3Gateway) AbortMultipartUpload(bucket string, object string, uploadID string) error { +func (l *s3Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error { return l.Client.AbortMultipartUpload(bucket, object, uploadID) } @@ -533,7 +533,7 @@ func toMinioClientCompleteParts(parts []completePart) []minio.CompletePart { } // CompleteMultipartUpload completes ongoing multipart upload and finalizes object -func (l *s3Gateway) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (ObjectInfo, error) { +func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (ObjectInfo, error) { err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts)) if err != nil { return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object) @@ -543,7 +543,7 @@ func (l *s3Gateway) CompleteMultipartUpload(bucket string, object string, upload } // SetBucketPolicies sets policy on bucket -func (l *s3Gateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { +func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil { return s3ToObjectError(traceError(err), bucket, "") } @@ -552,7 +552,7 @@ func (l *s3Gateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAcc } // GetBucketPolicies will get policy on bucket -func (l *s3Gateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { +func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { policyInfo, err := l.Client.GetBucketPolicy(bucket) if err != nil { return policy.BucketAccessPolicy{}, s3ToObjectError(traceError(err), bucket, "") @@ -561,7 +561,7 @@ func (l *s3Gateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, } // DeleteBucketPolicies deletes all policies on bucket -func (l *s3Gateway) DeleteBucketPolicies(bucket string) error { +func (l *s3Objects) DeleteBucketPolicies(bucket string) error { if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil { return s3ToObjectError(traceError(err), bucket, "") }