You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 
minio/cmd/bucket-handlers.go

593 lines
18 KiB

/*
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"sync"
mux "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/set"
)
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
// Enforces bucket policies for a bucket for a given tatusaction.
func enforceBucketPolicy(bucket string, action string, reqURL *url.URL) (s3Error APIErrorCode) {
// Verify if bucket actually exists
if err := checkBucketExist(bucket, newObjectLayerFn()); err != nil {
err = errorCause(err)
switch err.(type) {
case BucketNameInvalid:
// Return error for invalid bucket name.
return ErrInvalidBucketName
case BucketNotFound:
// For no bucket found we return NoSuchBucket instead.
return ErrNoSuchBucket
}
errorIf(err, "Unable to read bucket policy.")
// Return internal error for any other errors so that we can investigate.
return ErrInternalError
}
// Fetch bucket policy, if policy is not set return access denied.
policy := globalBucketPolicies.GetBucketPolicy(bucket)
if policy == nil {
return ErrAccessDenied
}
// Construct resource in 'arn:aws:s3:::examplebucket/object' format.
resource := bucketARNPrefix + strings.TrimSuffix(strings.TrimPrefix(reqURL.Path, "/"), "/")
// Get conditions for policy verification.
conditionKeyMap := make(map[string]set.StringSet)
for queryParam := range reqURL.Query() {
conditionKeyMap[queryParam] = set.CreateStringSet(reqURL.Query().Get(queryParam))
}
// Validate action, resource and conditions with current policy statements.
if !bucketPolicyEvalStatements(action, resource, conditionKeyMap, policy.Statements) {
return ErrAccessDenied
}
return ErrNone
}
// Check if the action is allowed on the bucket/prefix.
func isBucketActionAllowed(action, bucket, prefix string) bool {
policy := globalBucketPolicies.GetBucketPolicy(bucket)
if policy == nil {
return false
}
resource := bucketARNPrefix + path.Join(bucket, prefix)
var conditionKeyMap map[string]set.StringSet
// Validate action, resource and conditions with current policy statements.
if !bucketPolicyEvalStatements(action, resource, conditionKeyMap, policy.Statements) {
return false
}
return true
}
// GetBucketLocationHandler - GET Bucket location.
// -------------------------
// This operation returns bucket location.
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
s3Error := checkRequestAuthType(r, bucket, "s3:GetBucketLocation", globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion {
// Clients like boto3 send getBucketLocation() call signed with region that is configured.
s3Error = checkRequestAuthType(r, "", "s3:GetBucketLocation", serverConfig.GetRegion())
}
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
if _, err := objectAPI.GetBucketInfo(bucket); err != nil {
errorIf(err, "Unable to fetch bucket info.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Generate response.
encodedSuccessResponse := encodeResponse(LocationResponse{})
// Get current region.
region := serverConfig.GetRegion()
if region != globalMinioDefaultRegion {
encodedSuccessResponse = encodeResponse(LocationResponse{
Location: region,
})
}
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// ListMultipartUploadsHandler - GET Bucket (List Multipart uploads)
// -------------------------
// This operation lists in-progress multipart uploads. An in-progress
// multipart upload is a multipart upload that has been initiated,
// using the Initiate Multipart Upload request, but has not yet been
// completed or aborted. This operation returns at most 1,000 multipart
// uploads in the response.
//
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucketMultipartUploads", serverConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _ := getBucketMultipartResources(r.URL.Query())
if maxUploads < 0 {
writeErrorResponse(w, ErrInvalidMaxUploads, r.URL)
return
}
if keyMarker != "" {
// Marker not common with prefix is not implemented.
if !strings.HasPrefix(keyMarker, prefix) {
writeErrorResponse(w, ErrNotImplemented, r.URL)
return
}
}
listMultipartsInfo, err := objectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
errorIf(err, "Unable to list multipart uploads.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// generate response
response := generateListMultipartUploadsResponse(bucket, listMultipartsInfo)
encodedSuccessResponse := encodeResponse(response)
// write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// ListBucketsHandler - GET Service.
// -----------
// This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request.
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// ListBuckets does not have any bucket action.
s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion {
// Clients like boto3 send listBuckets() call signed with region that is configured.
s3Error = checkRequestAuthType(r, "", "", serverConfig.GetRegion())
}
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
// Invoke the list buckets.
bucketsInfo, err := objectAPI.ListBuckets()
if err != nil {
errorIf(err, "Unable to list buckets.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Generate response.
response := generateListBucketsResponse(bucketsInfo)
encodedSuccessResponse := encodeResponse(response)
// Write response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// DeleteMultipleObjectsHandler - deletes multiple objects.
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", serverConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
// Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrMissingContentLength, r.URL)
return
}
// Content-Md5 is requied should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _, ok := r.Header["Content-Md5"]; !ok {
writeErrorResponse(w, ErrMissingContentMD5, r.URL)
return
}
// Allocate incoming content length bytes.
deleteXMLBytes := make([]byte, r.ContentLength)
// Read incoming body XML bytes.
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
errorIf(err, "Unable to read HTTP body.")
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
// Unmarshal list of keys to be deleted.
deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
errorIf(err, "Unable to unmarshal delete objects request XML.")
writeErrorResponse(w, ErrMalformedXML, r.URL)
return
}
var wg = &sync.WaitGroup{} // Allocate a new wait group.
var dErrs = make([]error, len(deleteObjects.Objects))
// Delete all requested objects in parallel.
for index, object := range deleteObjects.Objects {
wg.Add(1)
go func(i int, obj ObjectIdentifier) {
defer wg.Done()
dErr := objectAPI.DeleteObject(bucket, obj.ObjectName)
if dErr != nil {
dErrs[i] = dErr
}
}(index, object)
}
wg.Wait()
// Collect deleted objects and errors if any.
var deletedObjects []ObjectIdentifier
var deleteErrors []DeleteError
for index, err := range dErrs {
object := deleteObjects.Objects[index]
// Success deleted objects are collected separately.
if err == nil {
deletedObjects = append(deletedObjects, object)
continue
}
if _, ok := errorCause(err).(ObjectNotFound); ok {
// If the object is not found it should be
// accounted as deleted as per S3 spec.
deletedObjects = append(deletedObjects, object)
continue
}
errorIf(err, "Unable to delete object. %s", object.ObjectName)
// Error during delete should be collected separately.
deleteErrors = append(deleteErrors, DeleteError{
Code: errorCodeResponse[toAPIErrorCode(err)].Code,
Message: errorCodeResponse[toAPIErrorCode(err)].Description,
Key: object.ObjectName,
})
}
// Generate response
response := generateMultiDeleteResponse(deleteObjects.Quiet, deletedObjects, deleteErrors)
encodedSuccessResponse := encodeResponse(response)
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
// Notify deleted event for objects.
for _, dobj := range deletedObjects {
eventNotify(eventData{
Type: ObjectRemovedDelete,
Bucket: bucket,
ObjInfo: ObjectInfo{
Name: dobj.ObjectName,
},
ReqParams: map[string]string{
"sourceIPAddress": r.RemoteAddr,
},
})
}
}
// PutBucketHandler - PUT Bucket
// ----------
// This implementation of the PUT operation creates a new bucket for authenticated request
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// PutBucket does not have any bucket action.
s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion {
// Clients like boto3 send putBucket() call signed with region that is configured.
s3Error = checkRequestAuthType(r, "", "", serverConfig.GetRegion())
}
if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
// Validate if incoming location constraint is valid, reject
// requests which do not follow valid region requirements.
if s3Error := isValidLocationConstraint(r); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
// Proceed to creating a bucket.
err := objectAPI.MakeBucket(bucket)
if err != nil {
errorIf(err, "Unable to create a bucket.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Make sure to add Location information here only for bucket
w.Header().Set("Location", getLocation(r))
writeSuccessResponseHeadersOnly(w)
}
// PostPolicyBucketHandler - POST policy
// ----------
// This implementation of the POST operation handles object creation with a specified
// signature policy in multipart/form-data
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// Here the parameter is the size of the form data that should
// be loaded in memory, the remaining being put in temporary files.
reader, err := r.MultipartReader()
if err != nil {
errorIf(err, "Unable to initialize multipart reader.")
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
}
fileBody, fileName, formValues, err := extractPostPolicyFormValues(reader)
if err != nil {
errorIf(err, "Unable to parse form values.")
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
}
bucket := mux.Vars(r)["bucket"]
formValues["Bucket"] = bucket
if fileName != "" && strings.Contains(formValues["Key"], "${filename}") {
// S3 feature to replace ${filename} found in Key form field
// by the filename attribute passed in multipart
formValues["Key"] = strings.Replace(formValues["Key"], "${filename}", fileName, -1)
}
object := formValues["Key"]
// Verify policy signature.
apiErr := doesPolicySignatureMatch(formValues)
if apiErr != ErrNone {
writeErrorResponse(w, apiErr, r.URL)
return
}
policyBytes, err := base64.StdEncoding.DecodeString(formValues["Policy"])
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
}
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
if err != nil {
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return
}
// Make sure formValues adhere to policy restrictions.
if apiErr = checkPostPolicy(formValues, postPolicyForm); apiErr != ErrNone {
writeErrorResponse(w, apiErr, r.URL)
return
}
// Use rangeReader to ensure that object size is within expected range.
lengthRange := postPolicyForm.Conditions.ContentLengthRange
if lengthRange.Valid {
// If policy restricted the size of the object.
fileBody = &rangeReader{
Reader: fileBody,
Min: lengthRange.Min,
Max: lengthRange.Max,
}
} else {
// Default values of min/max size of the object.
fileBody = &rangeReader{
Reader: fileBody,
Min: 0,
Max: maxObjectSize,
}
}
// Extract metadata to be saved from received Form.
metadata := extractMetadataFromForm(formValues)
sha256sum := ""
objectLock := globalNSMutex.NewNSLock(bucket, object)
objectLock.Lock()
defer objectLock.Unlock()
objInfo, err := objectAPI.PutObject(bucket, object, -1, fileBody, metadata, sha256sum)
if err != nil {
errorIf(err, "Unable to create object.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
w.Header().Set("Location", getObjectLocation(bucket, object))
successRedirect := formValues[http.CanonicalHeaderKey("success_action_redirect")]
successStatus := formValues[http.CanonicalHeaderKey("success_action_status")]
if successStatus == "" && successRedirect == "" {
writeSuccessNoContent(w)
} else {
if successRedirect != "" {
redirectURL := successRedirect + "?" + fmt.Sprintf("bucket=%s&key=%s&etag=%s",
bucket,
getURLEncodedName(object),
getURLEncodedName("\""+objInfo.MD5Sum+"\""))
writeRedirectSeeOther(w, redirectURL)
} else {
// Decide what http response to send depending on success_action_status parameter
switch successStatus {
case "201":
resp := encodeResponse(PostResponse{
Bucket: bucket,
Key: object,
ETag: "\"" + objInfo.MD5Sum + "\"",
Location: getObjectLocation(bucket, object),
})
writeResponse(w, http.StatusCreated, resp, "application/xml")
case "200":
writeSuccessResponseHeadersOnly(w)
default:
writeSuccessNoContent(w)
}
}
}
// Notify object created event.
eventNotify(eventData{
Type: ObjectCreatedPost,
Bucket: bucket,
ObjInfo: objInfo,
ReqParams: map[string]string{
"sourceIPAddress": r.RemoteAddr,
},
})
}
// HeadBucketHandler - HEAD Bucket
// ----------
// This operation is useful to determine if a bucket exists.
// The operation returns a 200 OK if the bucket exists and you
// have permission to access it. Otherwise, the operation might
// return responses such as 404 Not Found and 403 Forbidden.
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", serverConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponseHeadersOnly(w, s3Error)
return
}
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.RLock()
defer bucketLock.RUnlock()
if _, err := objectAPI.GetBucketInfo(bucket); err != nil {
errorIf(err, "Unable to fetch bucket info.")
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
return
}
writeSuccessResponseHeadersOnly(w)
}
// DeleteBucketHandler - Delete bucket
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
// DeleteBucket does not have any bucket action.
if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
bucketLock := globalNSMutex.NewNSLock(bucket, "")
bucketLock.Lock()
defer bucketLock.Unlock()
// Attempt to delete bucket.
if err := objectAPI.DeleteBucket(bucket); err != nil {
errorIf(err, "Unable to delete a bucket.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Delete bucket access policy, if present - ignore any errors.
_ = removeBucketPolicy(bucket, objectAPI)
// Delete notification config, if present - ignore any errors.
_ = removeNotificationConfig(bucket, objectAPI)
// Delete listener config, if present - ignore any errors.
_ = removeListenerConfig(bucket, objectAPI)
// Write success response.
writeSuccessNoContent(w)
}