Merge pull request #1199 from brendanashworth/improvements-fs

pkg/fs improvements
master
Harshavardhana 9 years ago
commit 59fbb6d081
  1. 12
      pkg/fs/fs-bucket-acl.go
  2. 53
      pkg/fs/fs-bucket.go
  3. 145
      pkg/fs/fs-bucket_test.go
  4. 11
      pkg/fs/fs-multipart.go

@ -18,8 +18,8 @@ package fs
// IsPrivateBucket - is private bucket
func (fs Filesystem) IsPrivateBucket(bucket string) bool {
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
fs.rwLock.RLock()
defer fs.rwLock.RUnlock()
bucketMetadata, ok := fs.buckets.Metadata[bucket]
if !ok {
return true
@ -29,8 +29,8 @@ func (fs Filesystem) IsPrivateBucket(bucket string) bool {
// IsPublicBucket - is public bucket
func (fs Filesystem) IsPublicBucket(bucket string) bool {
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
fs.rwLock.RLock()
defer fs.rwLock.RUnlock()
bucketMetadata, ok := fs.buckets.Metadata[bucket]
if !ok {
return true
@ -40,8 +40,8 @@ func (fs Filesystem) IsPublicBucket(bucket string) bool {
// IsReadOnlyBucket - is read only bucket
func (fs Filesystem) IsReadOnlyBucket(bucket string) bool {
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
fs.rwLock.RLock()
defer fs.rwLock.RUnlock()
bucketMetadata, ok := fs.buckets.Metadata[bucket]
if !ok {
return true

@ -36,14 +36,11 @@ func (fs Filesystem) DeleteBucket(bucket string) *probe.Error {
}
bucket = fs.denormalizeBucket(bucket)
bucketDir := filepath.Join(fs.path, bucket)
// Check if bucket exists.
if _, e := os.Stat(bucketDir); e != nil {
if e := os.Remove(bucketDir); e != nil {
// Error if there was no bucket in the first place.
if os.IsNotExist(e) {
return probe.NewError(BucketNotFound{Bucket: bucket})
}
return probe.NewError(e)
}
if e := os.Remove(bucketDir); e != nil {
// On windows the string is slightly different, handle it here.
if strings.Contains(e.Error(), "directory is not empty") {
return probe.NewError(BucketNotEmpty{Bucket: bucket})
@ -58,12 +55,12 @@ func (fs Filesystem) DeleteBucket(bucket string) *probe.Error {
// Critical region hold write lock.
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
delete(fs.buckets.Metadata, bucket)
if err := saveBucketsMetadata(*fs.buckets); err != nil {
fs.rwLock.Unlock()
return err.Trace(bucket)
}
fs.rwLock.Unlock()
return nil
}
@ -171,12 +168,12 @@ func (fs Filesystem) MakeBucket(bucket, acl string) *probe.Error {
// Critical region hold a write lock.
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
fs.buckets.Metadata[bucket] = bucketMetadata
if err := saveBucketsMetadata(*fs.buckets); err != nil {
fs.rwLock.Unlock()
return err.Trace(bucket)
}
fs.rwLock.Unlock()
return nil
}
@ -230,47 +227,29 @@ func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Er
// SetBucketMetadata - set bucket metadata.
func (fs Filesystem) SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error {
// Input validation.
if !IsValidBucketName(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket})
bucketMetadata, err := fs.GetBucketMetadata(bucket)
if err != nil {
return err
}
// Save the acl.
acl := metadata["acl"]
if !IsValidBucketACL(acl) {
return probe.NewError(InvalidACL{ACL: acl})
}
if acl == "" {
} else if acl == "" {
acl = "private"
}
bucket = fs.denormalizeBucket(bucket)
bucketDir := filepath.Join(fs.path, bucket)
fi, e := os.Stat(bucketDir)
if e != nil {
// Check if bucket exists.
if os.IsNotExist(e) {
return probe.NewError(BucketNotFound{Bucket: bucket})
}
return probe.NewError(e)
}
// Critical region handle read lock.
fs.rwLock.RLock()
bucketMetadata, ok := fs.buckets.Metadata[bucket]
fs.rwLock.RUnlock()
if !ok {
bucketMetadata = &BucketMetadata{}
bucketMetadata.Name = fi.Name()
bucketMetadata.Created = fi.ModTime()
}
bucketMetadata.ACL = BucketACL(acl)
bucket = fs.denormalizeBucket(bucket)
// Critical region handle write lock.
fs.rwLock.Lock()
fs.buckets.Metadata[bucket] = bucketMetadata
defer fs.rwLock.Unlock()
fs.buckets.Metadata[bucket] = &bucketMetadata
if err := saveBucketsMetadata(*fs.buckets); err != nil {
fs.rwLock.Unlock()
return err.Trace(bucket)
}
fs.rwLock.Unlock()
return nil
}

@ -0,0 +1,145 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fs
import (
"io/ioutil"
"os"
"strings"
"testing"
)
func TestDeleteBucket(t *testing.T) {
// Make a temporary directory to use as the filesystem.
directory, fserr := ioutil.TempDir("", "minio-benchmark")
if fserr != nil {
t.Fatal(fserr)
}
defer os.RemoveAll(directory)
// Create the filesystem.
filesystem, err := New(directory, 0)
if err != nil {
t.Fatal(err)
}
// Deleting a bucket that doesn't exist should error.
err = filesystem.DeleteBucket("bucket")
if !strings.Contains(err.Cause.Error(), "Bucket not found:") {
t.Fail()
}
}
func BenchmarkDeleteBucket(b *testing.B) {
// Make a temporary directory to use as the filesystem.
directory, fserr := ioutil.TempDir("", "minio-benchmark")
if fserr != nil {
b.Fatal(fserr)
}
defer os.RemoveAll(directory)
// Create the filesystem.
filesystem, err := New(directory, 0)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Creating buckets takes time, so stop and start the timer.
b.StopTimer()
// Create and delete the bucket over and over.
err = filesystem.MakeBucket("bucket", "public-read-write")
if err != nil {
b.Fatal(err)
}
b.StartTimer()
err = filesystem.DeleteBucket("bucket")
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkGetBucketMetadata(b *testing.B) {
// Make a temporary directory to use as the filesystem.
directory, fserr := ioutil.TempDir("", "minio-benchmark")
if fserr != nil {
b.Fatal(fserr)
}
defer os.RemoveAll(directory)
// Create the filesystem.
filesystem, err := New(directory, 0)
if err != nil {
b.Fatal(err)
}
// Put up a bucket with some metadata.
err = filesystem.MakeBucket("bucket", "public-read-write")
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Retrieve the metadata!
_, err := filesystem.GetBucketMetadata("bucket")
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkSetBucketMetadata(b *testing.B) {
// Make a temporary directory to use as the filesystem.
directory, fserr := ioutil.TempDir("", "minio-benchmark")
if fserr != nil {
b.Fatal(fserr)
}
defer os.RemoveAll(directory)
// Create the filesystem.
filesystem, err := New(directory, 0)
if err != nil {
b.Fatal(err)
}
// Put up a bucket with some metadata.
err = filesystem.MakeBucket("bucket", "public-read-write")
if err != nil {
b.Fatal(err)
}
metadata := make(map[string]string)
metadata["acl"] = "public-read-write"
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Set all the metadata!
err = filesystem.SetBucketMetadata("bucket", metadata)
if err != nil {
b.Fatal(err)
}
}
}

@ -276,6 +276,7 @@ func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.E
// Critical region requiring write lock.
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
// Initialize multipart session.
mpartSession := &MultipartSession{}
mpartSession.TotalParts = 0
@ -288,10 +289,8 @@ func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.E
fs.multiparts.ActiveSession[uploadID] = mpartSession
if err := saveMultipartsSession(*fs.multiparts); err != nil {
fs.rwLock.Unlock()
return "", err.Trace(objectPath)
}
fs.rwLock.Unlock()
return uploadID, nil
}
@ -445,12 +444,12 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
// Critical region requiring write lock.
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
fs.multiparts.ActiveSession[uploadID] = deserializedMultipartSession
if err := saveMultipartsSession(*fs.multiparts); err != nil {
fs.rwLock.Unlock()
return "", err.Trace(partPathPrefix)
}
fs.rwLock.Unlock()
// Return etag.
return partMetadata.ETag, nil
@ -693,11 +692,11 @@ func (fs Filesystem) AbortMultipartUpload(bucket, object, uploadID string) *prob
// Critical region requiring write lock.
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
delete(fs.multiparts.ActiveSession, uploadID)
if err := saveMultipartsSession(*fs.multiparts); err != nil {
fs.rwLock.Unlock()
return err.Trace(partPathPrefix)
}
fs.rwLock.Unlock()
return nil
}

Loading…
Cancel
Save