diff --git a/pkg/disk/disk.go b/pkg/disk/disk.go index 3691412c8..b95c95fbc 100644 --- a/pkg/disk/disk.go +++ b/pkg/disk/disk.go @@ -16,11 +16,11 @@ package disk -// StatFS stat fs struct is container which holds following values +// Info stat fs struct is container which holds following values // Total - total size of the volume / disk // Free - free size of the volume / disk -// FSType - file system type string -type StatFS struct { +// Type - file system type string +type Info struct { Total int64 Free int64 FSType string diff --git a/pkg/disk/disk_test.go b/pkg/disk/disk_test.go index 8758b7fb1..e463fa12f 100644 --- a/pkg/disk/disk_test.go +++ b/pkg/disk/disk_test.go @@ -36,9 +36,9 @@ func (s *MySuite) TestFree(c *C) { path, err := ioutil.TempDir(os.TempDir(), "minio-") c.Assert(err, IsNil) - statfs, err := disk.Stat(path) + di, err := disk.GetInfo(path) c.Assert(err, IsNil) - c.Assert(statfs.Total, Not(Equals), 0) - c.Assert(statfs.Free, Not(Equals), 0) - c.Assert(statfs.FSType, Not(Equals), "UNKNOWN") + c.Assert(di.Total, Not(Equals), 0) + c.Assert(di.Free, Not(Equals), 0) + c.Assert(di.FSType, Not(Equals), "UNKNOWN") } diff --git a/pkg/disk/stat_nix.go b/pkg/disk/stat_nix.go index 0ffb43909..c8e48d2b7 100644 --- a/pkg/disk/stat_nix.go +++ b/pkg/disk/stat_nix.go @@ -22,19 +22,19 @@ import ( "syscall" ) -// Stat returns total and free bytes available in a directory, e.g. `/`. -func Stat(path string) (statfs StatFS, err error) { +// GetInfo returns total and free bytes available in a directory, e.g. `/`. +func GetInfo(path string) (info Info, err error) { s := syscall.Statfs_t{} err = syscall.Statfs(path, &s) if err != nil { - return StatFS{}, err + return Info{}, err } - statfs = StatFS{} - statfs.Total = int64(s.Bsize) * int64(s.Blocks) - statfs.Free = int64(s.Bsize) * int64(s.Bfree) - statfs.FSType, err = getFSType(path) + info = Info{} + info.Total = int64(s.Bsize) * int64(s.Blocks) + info.Free = int64(s.Bsize) * int64(s.Bfree) + info.FSType, err = getFSType(path) if err != nil { - return StatFS{}, err + return Info{}, err } - return statfs, nil + return info, nil } diff --git a/pkg/disk/stat_windows.go b/pkg/disk/stat_windows.go index 9730a5782..6fe68c99e 100644 --- a/pkg/disk/stat_windows.go +++ b/pkg/disk/stat_windows.go @@ -23,11 +23,11 @@ import ( "unsafe" ) -// Stat returns total and free bytes available in a directory, e.g. `C:\`. +// GetInfo returns total and free bytes available in a directory, e.g. `C:\`. // It returns free space available to the user (including quota limitations) // // https://msdn.microsoft.com/en-us/library/windows/desktop/aa364937(v=vs.85).aspx -func Stat(path string) (statfs StatFS, err error) { +func GetInfo(path string) (info Info, err error) { dll := syscall.MustLoadDLL("kernel32.dll") // https://msdn.microsoft.com/en-us/library/windows/desktop/aa364937(v=vs.85).aspx // Retrieves information about the amount of space that is available on a disk volume, @@ -50,9 +50,9 @@ func Stat(path string) (statfs StatFS, err error) { uintptr(unsafe.Pointer(&lpFreeBytesAvailable)), uintptr(unsafe.Pointer(&lpTotalNumberOfBytes)), uintptr(unsafe.Pointer(&lpTotalNumberOfFreeBytes))) - statfs = StatFS{} - statfs.Total = int64(lpTotalNumberOfBytes) - statfs.Free = int64(lpFreeBytesAvailable) - statfs.FSType = getFSType(path) - return statfs, nil + info = Info{} + info.Total = int64(lpTotalNumberOfBytes) + info.Free = int64(lpFreeBytesAvailable) + info.FSType = getFSType(path) + return info, nil } diff --git a/pkg/fs/definitions.go b/pkg/fs/definitions.go index 6abb7b077..bd8715056 100644 --- a/pkg/fs/definitions.go +++ b/pkg/fs/definitions.go @@ -165,6 +165,9 @@ func IsValidBucketACL(acl string) bool { } } +// validBucket regexp. +var validBucket = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + // IsValidBucketName - verify bucket name in accordance with // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html func IsValidBucketName(bucket string) bool { @@ -177,12 +180,7 @@ func IsValidBucketName(bucket string) bool { if bucket[0] == '.' || bucket[len(bucket)-1] == '.' { return false } - if match, _ := regexp.MatchString("\\.\\.", bucket); match == true { - return false - } - // We don't support buckets with '.' in them - match, _ := regexp.MatchString("^[a-zA-Z0-9][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket) - return match + return validBucket.MatchString(bucket) } // IsValidObjectName - verify object name in accordance with diff --git a/pkg/fs/fs-bucket-listobjects.go b/pkg/fs/fs-bucket-listobjects.go index 1e32b4e03..825d5828a 100644 --- a/pkg/fs/fs-bucket-listobjects.go +++ b/pkg/fs/fs-bucket-listobjects.go @@ -38,6 +38,8 @@ func (fs Filesystem) ListObjects(bucket string, resources BucketResourcesMetadat return nil, resources, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix}) } + bucket = fs.denormalizeBucket(bucket) + p := bucketDir{} rootPrefix := filepath.Join(fs.path, bucket) // check bucket exists diff --git a/pkg/fs/fs-bucket.go b/pkg/fs/fs-bucket.go index 1fd2b8f43..5f2845df6 100644 --- a/pkg/fs/fs-bucket.go +++ b/pkg/fs/fs-bucket.go @@ -17,13 +17,15 @@ package fs import ( - "io/ioutil" + "fmt" + "io" "os" "path/filepath" "strings" "github.com/minio/minio-xl/pkg/probe" "github.com/minio/minio/pkg/disk" + "github.com/minio/minio/pkg/ioutils" ) /// Bucket Operations @@ -36,22 +38,20 @@ func (fs Filesystem) DeleteBucket(bucket string) *probe.Error { if !IsValidBucketName(bucket) { return probe.NewError(BucketNameInvalid{Bucket: bucket}) } + bucket = fs.denormalizeBucket(bucket) bucketDir := filepath.Join(fs.path, bucket) // check bucket exists - if _, err := os.Stat(bucketDir); err != nil { - if os.IsNotExist(err) { + if _, e := os.Stat(bucketDir); e != nil { + if os.IsNotExist(e) { return probe.NewError(BucketNotFound{Bucket: bucket}) } - return probe.NewError(err) - } - if _, ok := fs.buckets.Metadata[bucket]; !ok { - return probe.NewError(BucketNotFound{Bucket: bucket}) + return probe.NewError(e) } - if err := os.Remove(bucketDir); err != nil { - if strings.Contains(err.Error(), "directory not empty") { + if e := os.Remove(bucketDir); e != nil { + if strings.Contains(e.Error(), "directory not empty") { return probe.NewError(BucketNotEmpty{Bucket: bucket}) } - return probe.NewError(err) + return probe.NewError(e) } delete(fs.buckets.Metadata, bucket) if err := saveBucketsMetadata(fs.buckets); err != nil { @@ -60,34 +60,50 @@ func (fs Filesystem) DeleteBucket(bucket string) *probe.Error { return nil } +func removeDuplicateBuckets(elements []BucketMetadata) (result []BucketMetadata) { + // Use map to record duplicates as we find them. + duplicates := make(map[string]struct{}) + for _, element := range elements { + if _, ok := duplicates[element.Name]; !ok { + duplicates[element.Name] = struct{}{} + result = append(result, element) + } + } + return result +} + // ListBuckets - Get service func (fs Filesystem) ListBuckets() ([]BucketMetadata, *probe.Error) { fs.lock.Lock() defer fs.lock.Unlock() - files, err := ioutil.ReadDir(fs.path) - if err != nil { + files, err := ioutils.ReadDirN(fs.path, fs.maxBuckets) + if err != nil && err != io.EOF { return []BucketMetadata{}, probe.NewError(err) } - + if err == io.EOF { + fmt.Printf("Truncating the bucket list to %d entries only.", fs.maxBuckets) + } var metadataList []BucketMetadata for _, file := range files { if !file.IsDir() { // if files found ignore them continue } + dirName := strings.ToLower(file.Name()) if file.IsDir() { // if directories found with odd names, skip them too - if !IsValidBucketName(file.Name()) { + if !IsValidBucketName(dirName) { continue } } metadata := BucketMetadata{ - Name: file.Name(), + Name: dirName, Created: file.ModTime(), } metadataList = append(metadataList, metadata) } + metadataList = removeDuplicateBuckets(metadataList) return metadataList, nil } @@ -96,13 +112,13 @@ func (fs Filesystem) MakeBucket(bucket, acl string) *probe.Error { fs.lock.Lock() defer fs.lock.Unlock() - stfs, err := disk.Stat(fs.path) + di, err := disk.GetInfo(fs.path) if err != nil { return probe.NewError(err) } // Remove 5% from total space for cumulative disk space used for journalling, inodes etc. - availableDiskSpace := (float64(stfs.Free) / (float64(stfs.Total) - (0.05 * float64(stfs.Total)))) * 100 + availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return probe.NewError(RootPathFull{Path: fs.path}) } @@ -116,29 +132,26 @@ func (fs Filesystem) MakeBucket(bucket, acl string) *probe.Error { return probe.NewError(InvalidACL{ACL: acl}) } + bucket = fs.denormalizeBucket(bucket) // get bucket path bucketDir := filepath.Join(fs.path, bucket) - // check if bucket exists - if _, err = os.Stat(bucketDir); err == nil { - return probe.NewError(BucketExists{ - Bucket: bucket, - }) + if _, e := os.Stat(bucketDir); e == nil { + return probe.NewError(BucketExists{Bucket: bucket}) } // make bucket - err = os.Mkdir(bucketDir, 0700) - if err != nil { + if e := os.Mkdir(bucketDir, 0700); e != nil { return probe.NewError(err) } bucketMetadata := &BucketMetadata{} - fi, err := os.Stat(bucketDir) + fi, e := os.Stat(bucketDir) // check if bucket exists - if err != nil { - if os.IsNotExist(err) { + if e != nil { + if os.IsNotExist(e) { return probe.NewError(BucketNotFound{Bucket: bucket}) } - return probe.NewError(err) + return probe.NewError(e) } if strings.TrimSpace(acl) == "" { acl = "private" @@ -153,6 +166,19 @@ func (fs Filesystem) MakeBucket(bucket, acl string) *probe.Error { return nil } +func (fs Filesystem) denormalizeBucket(bucket string) string { + buckets, err := ioutils.ReadDirNamesN(fs.path, fs.maxBuckets) + if err != nil { + return bucket + } + for _, b := range buckets { + if strings.ToLower(b) == bucket { + return b + } + } + return bucket +} + // GetBucketMetadata - get bucket metadata func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) { fs.lock.Lock() @@ -160,15 +186,18 @@ func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Er if !IsValidBucketName(bucket) { return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } + + bucket = fs.denormalizeBucket(bucket) + // get bucket path bucketDir := filepath.Join(fs.path, bucket) - fi, err := os.Stat(bucketDir) - if err != nil { + fi, e := os.Stat(bucketDir) + if e != nil { // check if bucket exists - if os.IsNotExist(err) { + if os.IsNotExist(e) { return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } - return BucketMetadata{}, probe.NewError(err) + return BucketMetadata{}, probe.NewError(e) } bucketMetadata, ok := fs.buckets.Metadata[bucket] if !ok { @@ -194,14 +223,15 @@ func (fs Filesystem) SetBucketMetadata(bucket string, metadata map[string]string if strings.TrimSpace(acl) == "" { acl = "private" } + bucket = fs.denormalizeBucket(bucket) bucketDir := filepath.Join(fs.path, bucket) - fi, err := os.Stat(bucketDir) - if err != nil { + fi, e := os.Stat(bucketDir) + if e != nil { // check if bucket exists - if os.IsNotExist(err) { + if os.IsNotExist(e) { return probe.NewError(BucketNotFound{Bucket: bucket}) } - return probe.NewError(err) + return probe.NewError(e) } bucketMetadata, ok := fs.buckets.Metadata[bucket] if !ok { diff --git a/pkg/fs/fs-multipart.go b/pkg/fs/fs-multipart.go index 024d43948..f273f9385 100644 --- a/pkg/fs/fs-multipart.go +++ b/pkg/fs/fs-multipart.go @@ -59,14 +59,14 @@ func (fs Filesystem) ListMultipartUploads(bucket string, resources BucketMultipa if !IsValidBucketName(bucket) { return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } + bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) - _, err := os.Stat(bucketPath) - // check bucket exists - if os.IsNotExist(err) { - return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) - } - if err != nil { - return BucketMultipartResourcesMetadata{}, probe.NewError(InternalError{}) + if _, e := os.Stat(bucketPath); e != nil { + // check bucket exists + if os.IsNotExist(e) { + return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + return BucketMultipartResourcesMetadata{}, probe.NewError(e) } var uploads []*UploadMetadata for object, session := range fs.multiparts.ActiveSession { @@ -142,13 +142,13 @@ func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.E fs.lock.Lock() defer fs.lock.Unlock() - stfs, err := disk.Stat(fs.path) - if err != nil { - return "", probe.NewError(err) + di, e := disk.GetInfo(fs.path) + if e != nil { + return "", probe.NewError(e) } // Remove 5% from total space for cumulative disk space used for journalling, inodes etc. - availableDiskSpace := (float64(stfs.Free) / (float64(stfs.Total) - (0.05 * float64(stfs.Total)))) * 100 + availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return "", probe.NewError(RootPathFull{Path: fs.path}) } @@ -160,31 +160,35 @@ func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.E return "", probe.NewError(ObjectNameInvalid{Object: object}) } + bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) - _, err = os.Stat(bucketPath) - // check bucket exists - if os.IsNotExist(err) { - return "", probe.NewError(BucketNotFound{Bucket: bucket}) - } - if err != nil { - return "", probe.NewError(InternalError{}) + if _, e := os.Stat(bucketPath); e != nil { + // check bucket exists + if os.IsNotExist(e) { + return "", probe.NewError(BucketNotFound{Bucket: bucket}) + } + return "", probe.NewError(e) } + objectPath := filepath.Join(bucketPath, object) objectDir := filepath.Dir(objectPath) - if _, err = os.Stat(objectDir); os.IsNotExist(err) { - err = os.MkdirAll(objectDir, 0700) - if err != nil { - return "", probe.NewError(err) + if _, e := os.Stat(objectDir); e != nil { + if os.IsNotExist(e) { + e = os.MkdirAll(objectDir, 0700) + if e != nil { + return "", probe.NewError(e) + } } + return "", probe.NewError(e) } id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + object + time.Now().String()) uploadIDSum := sha512.Sum512(id) uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] - multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_WRONLY|os.O_CREATE, 0600) - if err != nil { - return "", probe.NewError(err) + multiPartfile, e := os.OpenFile(objectPath+"$multiparts", os.O_WRONLY|os.O_CREATE, 0600) + if e != nil { + return "", probe.NewError(e) } defer multiPartfile.Close() @@ -197,9 +201,8 @@ func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.E fs.multiparts.ActiveSession[object] = mpartSession encoder := json.NewEncoder(multiPartfile) - err = encoder.Encode(mpartSession) - if err != nil { - return "", probe.NewError(err) + if e = encoder.Encode(mpartSession); e != nil { + return "", probe.NewError(e) } if err := saveMultipartsSession(fs.multiparts); err != nil { return "", err.Trace() @@ -219,13 +222,13 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s fs.lock.Lock() defer fs.lock.Unlock() - stfs, err := disk.Stat(fs.path) + di, err := disk.GetInfo(fs.path) if err != nil { return "", probe.NewError(err) } // Remove 5% from total space for cumulative disk space used for journalling, inodes etc. - availableDiskSpace := (float64(stfs.Free) / (float64(stfs.Total) - (0.05 * float64(stfs.Total)))) * 100 + availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return "", probe.NewError(RootPathFull{Path: fs.path}) } @@ -257,15 +260,14 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } + bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, err = os.Stat(bucketPath); err != nil { // check bucket exists if os.IsNotExist(err) { return "", probe.NewError(BucketNotFound{Bucket: bucket}) } - if err != nil { - return "", probe.NewError(InternalError{}) - } + return "", probe.NewError(err) } objectPath := filepath.Join(bucketPath, object) @@ -357,6 +359,7 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) } + bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, err := os.Stat(bucketPath); err != nil { // check bucket exists @@ -470,14 +473,14 @@ func (fs Filesystem) ListObjectParts(bucket, object string, resources ObjectReso startPartNumber = objectResourcesMetadata.PartNumberMarker } + bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) - _, err := os.Stat(bucketPath) - // check bucket exists - if os.IsNotExist(err) { - return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) - } - if err != nil { - return ObjectResourcesMetadata{}, probe.NewError(InternalError{}) + if _, e := os.Stat(bucketPath); e != nil { + // check bucket exists + if os.IsNotExist(e) { + return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + return ObjectResourcesMetadata{}, probe.NewError(e) } objectPath := filepath.Join(bucketPath, object) @@ -528,27 +531,26 @@ func (fs Filesystem) AbortMultipartUpload(bucket, object, uploadID string) *prob return probe.NewError(InvalidUploadID{UploadID: uploadID}) } + bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) - _, err := os.Stat(bucketPath) - // check bucket exists - if os.IsNotExist(err) { - return probe.NewError(BucketNotFound{Bucket: bucket}) - } - if err != nil { - return probe.NewError(InternalError{}) + if _, e := os.Stat(bucketPath); e != nil { + // check bucket exists + if os.IsNotExist(e) { + return probe.NewError(BucketNotFound{Bucket: bucket}) + } + return probe.NewError(e) } objectPath := filepath.Join(bucketPath, object) for _, part := range fs.multiparts.ActiveSession[object].Parts { - err = os.RemoveAll(objectPath + fmt.Sprintf("$%d-$multiparts", part.PartNumber)) - if err != nil { - return probe.NewError(err) + e := os.RemoveAll(objectPath + fmt.Sprintf("$%d-$multiparts", part.PartNumber)) + if e != nil { + return probe.NewError(e) } } delete(fs.multiparts.ActiveSession, object) - err = os.RemoveAll(objectPath + "$multiparts") - if err != nil { - return probe.NewError(err) + if e := os.RemoveAll(objectPath + "$multiparts"); e != nil { + return probe.NewError(e) } return nil } diff --git a/pkg/fs/fs-object.go b/pkg/fs/fs-object.go index 7ed4cc430..57ab4b9a5 100644 --- a/pkg/fs/fs-object.go +++ b/pkg/fs/fs-object.go @@ -52,7 +52,16 @@ func (fs Filesystem) GetObject(w io.Writer, bucket, object string, start, length return 0, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } - objectPath := filepath.Join(fs.path, bucket, object) + bucket = fs.denormalizeBucket(bucket) + bucketPath := filepath.Join(fs.path, bucket) + if _, e := os.Stat(bucketPath); e != nil { + if os.IsNotExist(e) { + return 0, probe.NewError(BucketNotFound{Bucket: bucket}) + } + return 0, probe.NewError(e) + } + + objectPath := filepath.Join(bucketPath, object) filestat, err := os.Stat(objectPath) switch err := err.(type) { case nil: @@ -170,13 +179,13 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in fs.lock.Lock() defer fs.lock.Unlock() - stfs, err := disk.Stat(fs.path) + di, err := disk.GetInfo(fs.path) if err != nil { return ObjectMetadata{}, probe.NewError(err) } // Remove 5% from total space for cumulative disk space used for journalling, inodes etc. - availableDiskSpace := (float64(stfs.Free) / (float64(stfs.Total) - (0.05 * float64(stfs.Total)))) * 100 + availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return ObjectMetadata{}, probe.NewError(RootPathFull{Path: fs.path}) } @@ -185,9 +194,14 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } - // check bucket exists - if _, err = os.Stat(filepath.Join(fs.path, bucket)); os.IsNotExist(err) { - return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + + bucket = fs.denormalizeBucket(bucket) + bucketPath := filepath.Join(fs.path, bucket) + if _, e := os.Stat(bucketPath); e != nil { + if os.IsNotExist(e) { + return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + return ObjectMetadata{}, probe.NewError(e) } // verify object path legal if !IsValidObjectName(object) { @@ -195,7 +209,7 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in } // get object path - objectPath := filepath.Join(fs.path, bucket, object) + objectPath := filepath.Join(bucketPath, object) if strings.TrimSpace(expectedMD5Sum) != "" { var expectedMD5SumBytes []byte expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) @@ -308,10 +322,14 @@ func (fs Filesystem) DeleteObject(bucket, object string) *probe.Error { return probe.NewError(BucketNameInvalid{Bucket: bucket}) } + bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) // check bucket exists - if _, err := os.Stat(filepath.Join(fs.path, bucket)); os.IsNotExist(err) { - return probe.NewError(BucketNotFound{Bucket: bucket}) + if _, e := os.Stat(bucketPath); e != nil { + if os.IsNotExist(e) { + return probe.NewError(BucketNotFound{Bucket: bucket}) + } + return probe.NewError(e) } // verify object path legal diff --git a/pkg/fs/fs.go b/pkg/fs/fs.go index 3fc152ec0..76762b672 100644 --- a/pkg/fs/fs.go +++ b/pkg/fs/fs.go @@ -29,6 +29,7 @@ import ( type Filesystem struct { path string minFreeDisk int64 + maxBuckets int lock *sync.Mutex multiparts *Multiparts buckets *Buckets @@ -91,11 +92,20 @@ func New(rootPath string) (Filesystem, *probe.Error) { return Filesystem{}, err.Trace() } } - a := Filesystem{lock: new(sync.Mutex)} - a.path = rootPath - a.multiparts = multiparts - a.buckets = buckets - return a, nil + fs := Filesystem{lock: new(sync.Mutex)} + fs.path = rootPath + fs.multiparts = multiparts + fs.buckets = buckets + + /// Defaults + + // maximum buckets to be listed from list buckets. + fs.maxBuckets = 100 + // minium free disk required for i/o operations to succeed. + fs.minFreeDisk = 10 + + // Return here. + return fs, nil } // SetMinFreeDisk - set min free disk @@ -104,3 +114,13 @@ func (fs *Filesystem) SetMinFreeDisk(minFreeDisk int64) { defer fs.lock.Unlock() fs.minFreeDisk = minFreeDisk } + +// SetMaxBuckets - set total number of buckets supported, default is 100. +func (fs *Filesystem) SetMaxBuckets(maxBuckets int) { + fs.lock.Lock() + defer fs.lock.Unlock() + if maxBuckets == 0 { + maxBuckets = 100 + } + fs.maxBuckets = maxBuckets +} diff --git a/pkg/ioutils/ioutils.go b/pkg/ioutils/ioutils.go new file mode 100644 index 000000000..a81e59047 --- /dev/null +++ b/pkg/ioutils/ioutils.go @@ -0,0 +1,45 @@ +package ioutils + +import ( + "os" + "sort" +) + +// byName implements sort.Interface for sorting os.FileInfo list. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDirN reads the directory named by dirname and returns +// a list of sorted directory entries of size 'n'. +func ReadDirN(dirname string, n int) ([]os.FileInfo, error) { + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(n) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadDirNamesN reads the directory named by dirname and returns +// a list of sorted directory names of size 'n'. +func ReadDirNamesN(dirname string, n int) ([]string, error) { + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(n) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +}