fs: Cleanup Golang errors to be called 'e' and probe to be called as 'err'

- Replace the ACL checks back, remove them when bucket
  policy is implemented.
- Move FTW (File Tree Walk) into ioutils package.
master
Harshavardhana 9 years ago
parent b49f21ec82
commit 7a3409c309
  1. 18
      accesslog-handler.go
  2. 10
      api-auth-utils.go
  3. 35
      api-signature.go
  4. 98
      bucket-handlers.go
  5. 7
      main.go
  6. 63
      object-handlers.go
  7. 5
      pkg/fs/fs-bucket-listobjects.go
  8. 95
      pkg/fs/fs-multipart.go
  9. 30
      pkg/fs/fs-object.go
  10. 8
      pkg/fs/fs_test.go
  11. 83
      pkg/ioutils/filepath.go
  12. 36
      server-main.go
  13. 19
      server_fs_test.go
  14. 7
      signature-handler.go

@ -59,10 +59,10 @@ type LogMessage struct {
}
func (h *accessLogHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
message, perr := getLogMessage(w, req)
fatalIf(perr.Trace(), "Unable to extract http message.", nil)
_, err := h.accessLogFile.Write(message)
fatalIf(probe.NewError(err), "Writing to log file failed.", nil)
message, err := getLogMessage(w, req)
fatalIf(err.Trace(), "Unable to extract http message.", nil)
_, e := h.accessLogFile.Write(message)
fatalIf(probe.NewError(e), "Writing to log file failed.", nil)
h.Handler.ServeHTTP(w, req)
}
@ -103,9 +103,9 @@ func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, *probe.Err
// logMessage.HTTP.Request = req
logMessage.Duration = time.Now().UTC().Sub(logMessage.StartTime)
js, err := json.Marshal(logMessage)
if err != nil {
return nil, probe.NewError(err)
js, e := json.Marshal(logMessage)
if e != nil {
return nil, probe.NewError(e)
}
js = append(js, byte('\n')) // append a new line
return js, nil
@ -113,8 +113,8 @@ func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, *probe.Err
// AccessLogHandler logs requests
func AccessLogHandler(h http.Handler) http.Handler {
file, err := os.OpenFile("access.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
fatalIf(probe.NewError(err), "Unable to open access log.", nil)
file, e := os.OpenFile("access.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
fatalIf(probe.NewError(e), "Unable to open access log.", nil)
return &accessLogHandler{Handler: h, accessLogFile: file}
}

@ -42,9 +42,8 @@ func isValidAccessKey(accessKeyID string) bool {
// takes input as size in integer
func generateAccessKeyID() ([]byte, *probe.Error) {
alpha := make([]byte, minioAccessID)
_, err := rand.Read(alpha)
if err != nil {
return nil, probe.NewError(err)
if _, e := rand.Read(alpha); e != nil {
return nil, probe.NewError(e)
}
for i := 0; i < minioAccessID; i++ {
alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))]
@ -55,9 +54,8 @@ func generateAccessKeyID() ([]byte, *probe.Error) {
// generateSecretAccessKey - generate random base64 numeric value from a random seed.
func generateSecretAccessKey() ([]byte, *probe.Error) {
rb := make([]byte, minioSecretID)
_, err := rand.Read(rb)
if err != nil {
return nil, probe.NewError(err)
if _, e := rand.Read(rb); e != nil {
return nil, probe.NewError(e)
}
return []byte(base64.StdEncoding.EncodeToString(rb))[:minioSecretID], nil
}

@ -173,21 +173,20 @@ func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]stri
/// HTML Form values
formValues := make(map[string]string)
filePart := new(bytes.Buffer)
var err error
for err == nil {
var e error
for e == nil {
var part *multipart.Part
part, err = reader.NextPart()
part, e = reader.NextPart()
if part != nil {
if part.FileName() == "" {
buffer, err := ioutil.ReadAll(part)
if err != nil {
return nil, nil, probe.NewError(err)
buffer, e := ioutil.ReadAll(part)
if e != nil {
return nil, nil, probe.NewError(e)
}
formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer)
} else {
_, err := io.Copy(filePart, part)
if err != nil {
return nil, nil, probe.NewError(err)
if _, e := io.Copy(filePart, part); e != nil {
return nil, nil, probe.NewError(e)
}
}
}
@ -200,13 +199,13 @@ func applyPolicy(formValues map[string]string) *probe.Error {
return probe.NewError(errUnsupportedAlgorithm)
}
/// Decoding policy
policyBytes, err := base64.StdEncoding.DecodeString(formValues["Policy"])
if err != nil {
return probe.NewError(err)
policyBytes, e := base64.StdEncoding.DecodeString(formValues["Policy"])
if e != nil {
return probe.NewError(e)
}
postPolicyForm, perr := fs.ParsePostPolicyForm(string(policyBytes))
if perr != nil {
return perr.Trace()
postPolicyForm, err := fs.ParsePostPolicyForm(string(policyBytes))
if err != nil {
return err.Trace()
}
if !postPolicyForm.Expiration.After(time.Now().UTC()) {
return probe.NewError(errPolicyAlreadyExpired)
@ -254,9 +253,9 @@ func initPostPresignedPolicyV4(formValues map[string]string) (*fs.Signature, *pr
if !isValidAccessKey(accessKeyID) {
return nil, probe.NewError(errAccessKeyIDInvalid)
}
config, perr := loadConfigV2()
if perr != nil {
return nil, perr.Trace()
config, err := loadConfigV2()
if err != nil {
return nil, err.Trace()
}
region := credentialElements[2]
if config.Credentials.AccessKeyID == accessKeyID {

@ -34,6 +34,11 @@ func (api CloudStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, req *
vars := mux.Vars(req)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
_, err := api.Filesystem.GetBucketMetadata(bucket)
if err != nil {
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
@ -68,6 +73,11 @@ func (api CloudStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, re
vars := mux.Vars(req)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
resources := getBucketMultipartResources(req.URL.Query())
if resources.MaxUploads < 0 {
writeErrorResponse(w, req, InvalidMaxUploads, req.URL.Path)
@ -107,6 +117,13 @@ func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, req *http.R
vars := mux.Vars(req)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
// TODO handle encoding type.
prefix, marker, delimiter, maxkeys, _ := getBucketResources(req.URL.Query())
if maxkeys < 0 {
@ -148,6 +165,11 @@ func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, req *http.R
// This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request.
func (api CloudStorageAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
if isRequestRequiresACLCheck(req) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
buckets, err := api.Filesystem.ListBuckets()
if err == nil {
// generate response
@ -170,6 +192,11 @@ func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Req
vars := mux.Vars(req)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
// read from 'x-amz-acl'
aclType := getACLType(req)
if aclType == unsupportedACLType {
@ -208,13 +235,17 @@ func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Req
return
}
if signature != nil {
locationBytes, err := ioutil.ReadAll(req.Body)
if err != nil {
locationBytes, e := ioutil.ReadAll(req.Body)
if e != nil {
errorIf(probe.NewError(e), "MakeBucket failed.", nil)
writeErrorResponse(w, req, InternalError, req.URL.Path)
return
}
sh := sha256.New()
sh.Write(locationBytes)
ok, perr := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if perr != nil {
errorIf(perr.Trace(), "MakeBucket failed.", nil)
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if err != nil {
errorIf(err.Trace(), "MakeBucket failed.", nil)
writeErrorResponse(w, req, InternalError, req.URL.Path)
return
}
@ -224,7 +255,6 @@ func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Req
}
}
}
}
err := api.Filesystem.MakeBucket(bucket, getACLTypeString(aclType))
if err != nil {
@ -261,31 +291,31 @@ func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, req *h
// Here the parameter is the size of the form data that should
// be loaded in memory, the remaining being put in temporary
// files
reader, err := req.MultipartReader()
if err != nil {
errorIf(probe.NewError(err), "Unable to initialize multipart reader.", nil)
reader, e := req.MultipartReader()
if e != nil {
errorIf(probe.NewError(e), "Unable to initialize multipart reader.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path)
return
}
fileBody, formValues, perr := extractHTTPFormValues(reader)
if perr != nil {
errorIf(perr.Trace(), "Unable to parse form values.", nil)
fileBody, formValues, err := extractHTTPFormValues(reader)
if err != nil {
errorIf(err.Trace(), "Unable to parse form values.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path)
return
}
bucket := mux.Vars(req)["bucket"]
formValues["Bucket"] = bucket
object := formValues["Key"]
signature, perr := initPostPresignedPolicyV4(formValues)
if perr != nil {
errorIf(perr.Trace(), "Unable to initialize post policy presigned.", nil)
signature, err := initPostPresignedPolicyV4(formValues)
if err != nil {
errorIf(err.Trace(), "Unable to initialize post policy presigned.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path)
return
}
var ok bool
if ok, perr = signature.DoesPolicySignatureMatch(formValues["X-Amz-Date"]); perr != nil {
errorIf(perr.Trace(), "Unable to verify signature.", nil)
if ok, err = signature.DoesPolicySignatureMatch(formValues["X-Amz-Date"]); err != nil {
errorIf(err.Trace(), "Unable to verify signature.", nil)
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
return
}
@ -293,15 +323,15 @@ func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, req *h
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
return
}
if perr = applyPolicy(formValues); perr != nil {
errorIf(perr.Trace(), "Invalid request, policy doesn't match with the endpoint.", nil)
if err = applyPolicy(formValues); err != nil {
errorIf(err.Trace(), "Invalid request, policy doesn't match with the endpoint.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path)
return
}
metadata, perr := api.Filesystem.CreateObject(bucket, object, "", 0, fileBody, nil)
if perr != nil {
errorIf(perr.Trace(), "CreateObject failed.", nil)
switch perr.ToGoError().(type) {
metadata, err := api.Filesystem.CreateObject(bucket, object, "", 0, fileBody, nil)
if err != nil {
errorIf(err.Trace(), "CreateObject failed.", nil)
switch err.ToGoError().(type) {
case fs.RootPathFull:
writeErrorResponse(w, req, RootPathFull, req.URL.Path)
case fs.BucketNotFound:
@ -336,6 +366,11 @@ func (api CloudStorageAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.
vars := mux.Vars(req)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
// read from 'x-amz-acl'
aclType := getACLType(req)
if aclType == unsupportedACLType {
@ -368,6 +403,11 @@ func (api CloudStorageAPI) GetBucketACLHandler(w http.ResponseWriter, req *http.
vars := mux.Vars(req)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
bucketMetadata, err := api.Filesystem.GetBucketMetadata(bucket)
if err != nil {
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
@ -400,6 +440,13 @@ func (api CloudStorageAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Re
vars := mux.Vars(req)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
_, err := api.Filesystem.GetBucketMetadata(bucket)
if err != nil {
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
@ -421,6 +468,11 @@ func (api CloudStorageAPI) DeleteBucketHandler(w http.ResponseWriter, req *http.
vars := mux.Vars(req)
bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
err := api.Filesystem.DeleteBucket(bucket)
if err != nil {
errorIf(err.Trace(), "DeleteBucket failed.", nil)

@ -19,7 +19,6 @@ package main
import (
"fmt"
"os"
"os/user"
"runtime"
"strconv"
@ -55,12 +54,6 @@ func init() {
// Check if minio was compiled using a supported version of Golang.
checkGolangRuntimeVersion()
// Check for the environment early on and gracefuly report.
_, err := user.Current()
if err != nil {
}
if os.Getenv("DOCKERIMAGE") == "1" {
// the further checks are ignored for docker image
return

@ -39,6 +39,13 @@ func (api CloudStorageAPI) GetObjectHandler(w http.ResponseWriter, req *http.Req
bucket = vars["bucket"]
object = vars["object"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
metadata, err := api.Filesystem.GetObjectMetadata(bucket, object)
if err != nil {
errorIf(err.Trace(), "GetObject failed.", nil)
@ -78,6 +85,13 @@ func (api CloudStorageAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Re
bucket = vars["bucket"]
object = vars["object"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
metadata, err := api.Filesystem.GetObjectMetadata(bucket, object)
if err != nil {
switch err.ToGoError().(type) {
@ -107,6 +121,13 @@ func (api CloudStorageAPI) PutObjectHandler(w http.ResponseWriter, req *http.Req
bucket = vars["bucket"]
object = vars["object"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
// get Content-MD5 sent by client and verify if valid
md5 := req.Header.Get("Content-MD5")
if !isValidMD5(md5) {
@ -192,6 +213,13 @@ func (api CloudStorageAPI) NewMultipartUploadHandler(w http.ResponseWriter, req
bucket = vars["bucket"]
object = vars["object"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
uploadID, err := api.Filesystem.NewMultipartUpload(bucket, object)
if err != nil {
errorIf(err.Trace(), "NewMultipartUpload failed.", nil)
@ -226,6 +254,13 @@ func (api CloudStorageAPI) PutObjectPartHandler(w http.ResponseWriter, req *http
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
// get Content-MD5 sent by client and verify if valid
md5 := req.Header.Get("Content-MD5")
if !isValidMD5(md5) {
@ -317,6 +352,13 @@ func (api CloudStorageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, re
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
objectResourcesMetadata := getObjectResources(req.URL.Query())
err := api.Filesystem.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID)
if err != nil {
@ -346,6 +388,13 @@ func (api CloudStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, req *ht
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
objectResourcesMetadata := getObjectResources(req.URL.Query())
if objectResourcesMetadata.PartNumberMarker < 0 {
writeErrorResponse(w, req, InvalidPartNumberMarker, req.URL.Path)
@ -392,6 +441,13 @@ func (api CloudStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter,
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
objectResourcesMetadata := getObjectResources(req.URL.Query())
var signature *fs.Signature
if isRequestSignatureV4(req) {
@ -461,6 +517,13 @@ func (api CloudStorageAPI) DeleteObjectHandler(w http.ResponseWriter, req *http.
bucket := vars["bucket"]
object := vars["object"]
if isRequestRequiresACLCheck(req) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
return
}
}
err := api.Filesystem.DeleteObject(bucket, object)
if err != nil {
errorIf(err.Trace(), "DeleteObject failed.", nil)

@ -26,6 +26,7 @@ import (
"time"
"github.com/minio/minio-xl/pkg/probe"
"github.com/minio/minio/pkg/ioutils"
)
// listObjectsParams - list objects input parameters.
@ -77,7 +78,7 @@ func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKe
walkPath = prefixPath
}
}
Walk(walkPath, func(path string, info os.FileInfo, err error) error {
ioutils.FTW(walkPath, func(path string, info os.FileInfo, err error) error {
// We don't need to list the walk path.
if path == walkPath {
return nil
@ -108,7 +109,7 @@ func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKe
// If delimiter is set, we stop if current path is a
// directory.
if delimiter != "" && info.IsDir() {
return ErrSkipDir
return ioutils.ErrSkipDir
}
}
return nil

@ -273,31 +273,30 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
objectPath := filepath.Join(bucketPath, object)
partPath := objectPath + fmt.Sprintf("$%d-$multiparts", partID)
partFile, err := atomic.FileCreateWithPrefix(partPath, "$multiparts")
if err != nil {
return "", probe.NewError(err)
partFile, e := atomic.FileCreateWithPrefix(partPath, "$multiparts")
if e != nil {
return "", probe.NewError(e)
}
h := md5.New()
sh := sha256.New()
mw := io.MultiWriter(partFile, h, sh)
_, err = io.CopyN(mw, data, size)
if err != nil {
if _, e = io.CopyN(mw, data, size); e != nil {
partFile.CloseAndPurge()
return "", probe.NewError(err)
return "", probe.NewError(e)
}
md5sum := hex.EncodeToString(h.Sum(nil))
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5sum); err != nil {
if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5sum) {
partFile.CloseAndPurge()
return "", probe.NewError(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Object: object})
}
}
if signature != nil {
ok, perr := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if perr != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if err != nil {
partFile.CloseAndPurge()
return "", perr.Trace()
return "", err.Trace()
}
if !ok {
partFile.CloseAndPurge()
@ -306,9 +305,9 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
}
partFile.Close()
fi, err := os.Stat(partPath)
if err != nil {
return "", probe.NewError(err)
fi, e := os.Stat(partPath)
if e != nil {
return "", probe.NewError(e)
}
partMetadata := PartMetadata{}
partMetadata.ETag = md5sum
@ -316,17 +315,16 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
partMetadata.Size = fi.Size()
partMetadata.LastModified = fi.ModTime()
multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDWR|os.O_APPEND, 0600)
if err != nil {
return "", probe.NewError(err)
multiPartfile, e := os.OpenFile(objectPath+"$multiparts", os.O_RDWR|os.O_APPEND, 0600)
if e != nil {
return "", probe.NewError(e)
}
defer multiPartfile.Close()
var deserializedMultipartSession MultipartSession
decoder := json.NewDecoder(multiPartfile)
err = decoder.Decode(&deserializedMultipartSession)
if err != nil {
return "", probe.NewError(err)
if e = decoder.Decode(&deserializedMultipartSession); e != nil {
return "", probe.NewError(e)
}
deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, &partMetadata)
deserializedMultipartSession.TotalParts++
@ -334,9 +332,8 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
sort.Sort(partNumber(deserializedMultipartSession.Parts))
encoder := json.NewEncoder(multiPartfile)
err = encoder.Encode(&deserializedMultipartSession)
if err != nil {
return "", probe.NewError(err)
if e = encoder.Encode(&deserializedMultipartSession); e != nil {
return "", probe.NewError(e)
}
return partMetadata.ETag, nil
}
@ -362,34 +359,34 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da
bucket = fs.denormalizeBucket(bucket)
bucketPath := filepath.Join(fs.path, bucket)
if _, err := os.Stat(bucketPath); err != nil {
if _, e := os.Stat(bucketPath); e != nil {
// check bucket exists
if os.IsNotExist(err) {
if os.IsNotExist(e) {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
return ObjectMetadata{}, probe.NewError(InternalError{})
}
objectPath := filepath.Join(bucketPath, object)
file, err := atomic.FileCreateWithPrefix(objectPath, "")
if err != nil {
return ObjectMetadata{}, probe.NewError(err)
file, e := atomic.FileCreateWithPrefix(objectPath, "")
if e != nil {
return ObjectMetadata{}, probe.NewError(e)
}
h := md5.New()
mw := io.MultiWriter(file, h)
partBytes, err := ioutil.ReadAll(data)
if err != nil {
partBytes, e := ioutil.ReadAll(data)
if e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(err)
return ObjectMetadata{}, probe.NewError(e)
}
if signature != nil {
sh := sha256.New()
sh.Write(partBytes)
ok, perr := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if perr != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if err != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(err)
return ObjectMetadata{}, err.Trace()
}
if !ok {
file.CloseAndPurge()
@ -397,7 +394,7 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da
}
}
parts := &CompleteMultipartUpload{}
if err := xml.Unmarshal(partBytes, parts); err != nil {
if e := xml.Unmarshal(partBytes, parts); e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(MalformedXML{})
}
@ -413,25 +410,24 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da
delete(fs.multiparts.ActiveSession, object)
for _, part := range parts.Part {
err = os.Remove(objectPath + fmt.Sprintf("$%d-$multiparts", part.PartNumber))
if err != nil {
if e = os.Remove(objectPath + fmt.Sprintf("$%d-$multiparts", part.PartNumber)); e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(err)
return ObjectMetadata{}, probe.NewError(e)
}
}
if err := os.Remove(objectPath + "$multiparts"); err != nil {
if e := os.Remove(objectPath + "$multiparts"); e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(err)
return ObjectMetadata{}, probe.NewError(e)
}
if err := saveMultipartsSession(fs.multiparts); err != nil {
if e := saveMultipartsSession(fs.multiparts); e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, err.Trace()
return ObjectMetadata{}, e.Trace()
}
file.Close()
st, err := os.Stat(objectPath)
if err != nil {
return ObjectMetadata{}, probe.NewError(err)
st, e := os.Stat(objectPath)
if e != nil {
return ObjectMetadata{}, probe.NewError(e)
}
contentType := "application/octet-stream"
if objectExt := filepath.Ext(objectPath); objectExt != "" {
@ -489,17 +485,16 @@ func (fs Filesystem) ListObjectParts(bucket, object string, resources ObjectReso
}
objectPath := filepath.Join(bucketPath, object)
multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDONLY, 0600)
if err != nil {
return ObjectResourcesMetadata{}, probe.NewError(err)
multiPartfile, e := os.OpenFile(objectPath+"$multiparts", os.O_RDONLY, 0600)
if e != nil {
return ObjectResourcesMetadata{}, probe.NewError(e)
}
defer multiPartfile.Close()
var deserializedMultipartSession MultipartSession
decoder := json.NewDecoder(multiPartfile)
err = decoder.Decode(&deserializedMultipartSession)
if err != nil {
return ObjectResourcesMetadata{}, probe.NewError(err)
if e = decoder.Decode(&deserializedMultipartSession); e != nil {
return ObjectResourcesMetadata{}, probe.NewError(e)
}
var parts []*PartMetadata
for i := startPartNumber; i <= deserializedMultipartSession.TotalParts; i++ {

@ -26,7 +26,6 @@ import (
"crypto/md5"
"encoding/base64"
"encoding/hex"
"errors"
"runtime"
"github.com/minio/minio-xl/pkg/atomic"
@ -34,6 +33,7 @@ import (
"github.com/minio/minio-xl/pkg/probe"
"github.com/minio/minio/pkg/contentdb"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/ioutils"
)
/// Object Operations
@ -169,22 +169,22 @@ func getMetadata(rootPath, bucket, object string) (ObjectMetadata, *probe.Error)
}
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool {
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
if err != nil {
return probe.NewError(err)
return false
}
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
if err != nil {
return probe.NewError(err)
return false
}
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
return probe.NewError(BadDigest{Md5: expectedMD5Sum})
return false
}
return nil
return true
}
return probe.NewError(errors.New("invalid argument"))
return false
}
// CreateObject - PUT object
@ -254,14 +254,12 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
mw := io.MultiWriter(file, h, sh)
if size > 0 {
_, e = io.CopyN(mw, data, size)
if e != nil {
if _, e = io.CopyN(mw, data, size); e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(e)
}
} else {
_, e = io.Copy(mw, data)
if e != nil {
if _, e = io.Copy(mw, data); e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(e)
}
@ -270,7 +268,7 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
md5Sum := hex.EncodeToString(h.Sum(nil))
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if e := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); e != nil {
if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum) {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Object: object})
}
@ -312,7 +310,6 @@ func deleteObjectPath(basePath, deletePath, bucket, object string) *probe.Error
if basePath == deletePath {
return nil
}
fi, e := os.Stat(deletePath)
if e != nil {
if os.IsNotExist(e) {
@ -321,15 +318,14 @@ func deleteObjectPath(basePath, deletePath, bucket, object string) *probe.Error
return probe.NewError(e)
}
if fi.IsDir() {
empty, err := isDirEmpty(deletePath)
if err != nil {
return err.Trace(deletePath)
empty, e := ioutils.IsDirEmpty(deletePath)
if e != nil {
return probe.NewError(e)
}
if !empty {
return nil
}
}
if e := os.Remove(deletePath); e != nil {
return probe.NewError(e)
}

@ -33,12 +33,12 @@ var _ = Suite(&MySuite{})
func (s *MySuite) TestAPISuite(c *C) {
var storageList []string
create := func() Filesystem {
path, err := ioutil.TempDir(os.TempDir(), "minio-")
c.Check(err, IsNil)
path, e := ioutil.TempDir(os.TempDir(), "minio-")
c.Check(e, IsNil)
storageList = append(storageList, path)
store, perr := New(path)
store, err := New(path)
store.SetMinFreeDisk(0)
c.Check(perr, IsNil)
c.Check(err, IsNil)
return store
}
APITestSuite(c, create)

@ -14,7 +14,7 @@
* limitations under the License.
*/
package fs
package ioutils
import (
"errors"
@ -22,20 +22,13 @@ import (
"os"
"path/filepath"
"sort"
"github.com/minio/minio-xl/pkg/probe"
)
// Check if a directory is empty
func isDirEmpty(dirname string) (bool, *probe.Error) {
f, err := os.Open(dirname)
defer f.Close()
if err != nil {
return false, probe.NewError(err)
}
names, err := f.Readdirnames(1)
// IsDirEmpty Check if a directory is empty
func IsDirEmpty(dirname string) (bool, error) {
names, err := ReadDirNamesN(dirname, 1)
if err != nil && err != io.EOF {
return false, probe.NewError(err)
return false, err
}
if len(names) > 0 {
return false, nil
@ -43,19 +36,9 @@ func isDirEmpty(dirname string) (bool, *probe.Error) {
return true, nil
}
// Walk walks the file tree rooted at root, calling walkFn for each file or
// FTW walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root.
func Walk(root string, walkFn WalkFunc) error {
info, err := os.Lstat(root)
if err != nil {
return walkFn(root, nil, err)
}
return walk(root, info, walkFn)
}
// WalkUnsorted walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root.
func WalkUnsorted(root string, walkFn WalkFunc) error {
func FTW(root string, walkFn FTWFunc) error {
info, err := os.Lstat(root)
if err != nil {
return walkFn(root, nil, err)
@ -91,10 +74,12 @@ func readDirUnsortedNames(dirname string) ([]string, error) {
return nil, err
}
nameInfos, err := f.Readdir(-1)
f.Close()
if err != nil {
return nil, err
}
if err = f.Close(); err != nil {
return nil, err
}
var names []string
for _, nameInfo := range nameInfos {
names = append(names, getRealName(nameInfo))
@ -102,12 +87,12 @@ func readDirUnsortedNames(dirname string) ([]string, error) {
return names, nil
}
// WalkFunc is the type of the function called for each file or directory
// FTWFunc is the type of the function called for each file or directory
// visited by Walk. The path argument contains the argument to Walk as a
// prefix; that is, if Walk is called with "dir", which is a directory
// containing the file "a", the walk function will be called with argument
// "dir/a". The info argument is the os.FileInfo for the named path.
type WalkFunc func(path string, info os.FileInfo, err error) error
type FTWFunc func(path string, info os.FileInfo, err error) error
// ErrSkipDir is used as a return value from WalkFuncs to indicate that
// the directory named in the call is to be skipped. It is not returned
@ -123,48 +108,8 @@ var ErrSkipFile = errors.New("skip this file")
// file or a symlink left
var ErrDirNotEmpty = errors.New("directory not empty")
func walkUnsorted(path string, info os.FileInfo, walkFn WalkFunc) error {
err := walkFn(path, info, nil)
if err != nil {
if info.Mode().IsDir() && err == ErrSkipDir {
return nil
}
if info.Mode().IsRegular() && err == ErrSkipFile {
return nil
}
return err
}
if !info.IsDir() {
return nil
}
names, err := readDirUnsortedNames(path)
if err != nil {
return walkFn(path, info, err)
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := os.Lstat(filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != ErrSkipDir && err != ErrSkipFile {
return err
}
} else {
err = walk(filename, fileInfo, walkFn)
if err != nil {
if err == ErrSkipDir || err == ErrSkipFile {
return nil
}
return err
}
}
}
return nil
}
// walk recursively descends path, calling w.
func walk(path string, info os.FileInfo, walkFn WalkFunc) error {
// walk recursively descends path, calling walkFn.
func walk(path string, info os.FileInfo, walkFn FTWFunc) error {
err := walkFn(path, info, nil)
if err != nil {
if info.Mode().IsDir() && err == ErrSkipDir {

@ -170,35 +170,32 @@ func parsePercentToInt(s string, bitSize int) (int64, *probe.Error) {
i := strings.Index(s, "%")
if i < 0 {
// no percentage string found try to parse the whole string anyways
p, err := strconv.ParseInt(s, 10, bitSize)
if err != nil {
return 0, probe.NewError(err)
p, e := strconv.ParseInt(s, 10, bitSize)
if e != nil {
return 0, probe.NewError(e)
}
return p, nil
}
p, err := strconv.ParseInt(s[:i], 10, bitSize)
if err != nil {
return 0, probe.NewError(err)
p, e := strconv.ParseInt(s[:i], 10, bitSize)
if e != nil {
return 0, probe.NewError(e)
}
return p, nil
}
func setLogger(conf *configV2) *probe.Error {
if conf.IsMongoLoggingEnabled() {
err := log2Mongo(conf.MongoLogger.Addr, conf.MongoLogger.DB, conf.MongoLogger.Collection)
if err != nil {
return err.Trace()
if err := log2Mongo(conf.MongoLogger.Addr, conf.MongoLogger.DB, conf.MongoLogger.Collection); err != nil {
return err.Trace(conf.MongoLogger.Addr, conf.MongoLogger.DB, conf.MongoLogger.Collection)
}
}
if conf.IsSysloggingEnabled() {
err := log2Syslog(conf.SyslogLogger.Network, conf.SyslogLogger.Addr)
if err != nil {
return err.Trace()
if err := log2Syslog(conf.SyslogLogger.Network, conf.SyslogLogger.Addr); err != nil {
return err.Trace(conf.SyslogLogger.Network, conf.SyslogLogger.Addr)
}
}
if conf.IsFileLoggingEnabled() {
err := log2File(conf.FileLogger.Filename)
if err != nil {
return err.Trace()
if err := log2File(conf.FileLogger.Filename); err != nil {
return err.Trace(conf.FileLogger.Filename)
}
}
return nil
@ -240,8 +237,8 @@ func (a accessKeys) String() string {
// JSON - json formatted output
func (a accessKeys) JSON() string {
b, err := json.Marshal(a)
errorIf(probe.NewError(err), "Unable to marshal json", nil)
b, e := json.Marshal(a)
errorIf(probe.NewError(e), "Unable to marshal json", nil)
return string(b)
}
@ -277,8 +274,8 @@ func checkServerSyntax(c *cli.Context) {
func serverMain(c *cli.Context) {
checkServerSyntax(c)
conf, perr := initServer()
fatalIf(perr.Trace(), "Failed to read config for minio.", nil)
conf, err := initServer()
fatalIf(err.Trace(), "Failed to read config for minio.", nil)
certFile := c.GlobalString("cert")
keyFile := c.GlobalString("key")
@ -299,7 +296,6 @@ func serverMain(c *cli.Context) {
fatalIf(probe.NewError(errInvalidArgument), "Minimum free disk should be set only once.", nil)
}
args = args.Tail()
var err *probe.Error
minFreeDisk, err = parsePercentToInt(args.First(), 64)
fatalIf(err.Trace(args.First()), "Invalid minium free disk size "+args.First()+" passed.", nil)
args = args.Tail()

@ -51,17 +51,17 @@ var _ = Suite(&MyAPIFSCacheSuite{})
var testAPIFSCacheServer *httptest.Server
func (s *MyAPIFSCacheSuite) SetUpSuite(c *C) {
root, err := ioutil.TempDir(os.TempDir(), "api-")
c.Assert(err, IsNil)
root, e := ioutil.TempDir(os.TempDir(), "api-")
c.Assert(e, IsNil)
s.root = root
fsroot, err := ioutil.TempDir(os.TempDir(), "api-")
c.Assert(err, IsNil)
fsroot, e := ioutil.TempDir(os.TempDir(), "api-")
c.Assert(e, IsNil)
accessKeyID, perr := generateAccessKeyID()
c.Assert(perr, IsNil)
secretAccessKey, perr := generateSecretAccessKey()
c.Assert(perr, IsNil)
accessKeyID, err := generateAccessKeyID()
c.Assert(err, IsNil)
secretAccessKey, err := generateSecretAccessKey()
c.Assert(err, IsNil)
conf := newConfigV2()
conf.Credentials.AccessKeyID = string(accessKeyID)
@ -72,8 +72,7 @@ func (s *MyAPIFSCacheSuite) SetUpSuite(c *C) {
// do this only once here
setGlobalConfigPath(root)
perr = saveConfig(conf)
c.Assert(perr, IsNil)
c.Assert(saveConfig(conf), IsNil)
cloudServer := cloudServerConfig{
Path: fsroot,

@ -44,6 +44,13 @@ func isRequestSignatureV4(req *http.Request) bool {
return false
}
func isRequestRequiresACLCheck(req *http.Request) bool {
if isRequestSignatureV4(req) || isRequestPresignedSignatureV4(req) || isRequestPostPolicySignatureV4(req) {
return false
}
return true
}
func isRequestPresignedSignatureV4(req *http.Request) bool {
if _, ok := req.URL.Query()["X-Amz-Credential"]; ok {
return ok

Loading…
Cancel
Save