Add updateConfig code to load config changes if possible for every function

master
Harshavardhana 10 years ago
parent 36835befe6
commit 10b082144e
  1. 10
      pkg/donut/cache/data/data.go
  2. 34
      pkg/donut/config.go
  3. 6
      pkg/donut/donut-v1_test.go
  4. 83
      pkg/donut/donut-v2.go
  5. 8
      pkg/donut/donut-v2_test.go
  6. 39
      pkg/donut/multipart.go
  7. 2
      pkg/server/api/api.go

@ -75,8 +75,18 @@ func NewCache(maxSize uint64) *Cache {
}
}
// SetMaxSize set a new max size
func (r *Cache) SetMaxSize(maxSize uint64) {
r.Lock()
defer r.Unlock()
r.maxSize = maxSize
return
}
// Stats get current cache statistics
func (r *Cache) Stats() Stats {
r.Lock()
defer r.Unlock()
return Stats{
Bytes: r.currentSize,
Items: r.items.Len(),

@ -34,12 +34,20 @@ func getDonutConfigPath() (string, error) {
return donutConfigPath, nil
}
var customConfigPath string
// SaveConfig save donut config
func SaveConfig(a *Config) error {
donutConfigPath, err := getDonutConfigPath()
var donutConfigPath string
var err error
if customConfigPath != "" {
donutConfigPath = customConfigPath
} else {
donutConfigPath, err = getDonutConfigPath()
if err != nil {
return iodine.New(err, nil)
}
}
qc, err := quick.New(a)
if err != nil {
return iodine.New(err, nil)
@ -52,10 +60,16 @@ func SaveConfig(a *Config) error {
// LoadConfig load donut config
func LoadConfig() (*Config, error) {
donutConfigPath, err := getDonutConfigPath()
var donutConfigPath string
var err error
if customConfigPath != "" {
donutConfigPath = customConfigPath
} else {
donutConfigPath, err = getDonutConfigPath()
if err != nil {
return nil, iodine.New(err, nil)
}
}
a := &Config{}
a.Version = "0.0.1"
qc, err := quick.New(a)
@ -67,19 +81,3 @@ func LoadConfig() (*Config, error) {
}
return qc.Data().(*Config), nil
}
// LoadDonut load donut from config
func LoadDonut() (Interface, error) {
conf, err := LoadConfig()
if err != nil {
conf = &Config{
Version: "0.0.1",
MaxSize: 512000000,
}
}
donut, err := New(conf)
if err != nil {
return nil, iodine.New(err, nil)
}
return donut, nil
}

@ -62,11 +62,15 @@ func (s *MyDonutSuite) SetUpSuite(c *C) {
s.root = root
conf := new(Config)
conf.Version = "0.0.1"
conf.DonutName = "test"
conf.NodeDiskMap = createTestNodeDiskMap(root)
conf.MaxSize = 100000
customConfigPath = filepath.Join(root, "donut.json")
err = SaveConfig(conf)
c.Assert(err, IsNil)
dd, err = New(conf)
dd, err = New()
c.Assert(err, IsNil)
// testing empty donut

@ -25,6 +25,7 @@ import (
"io"
"io/ioutil"
"log"
"reflect"
"runtime/debug"
"sort"
"strconv"
@ -71,11 +72,22 @@ type storedBucket struct {
}
// New instantiate a new donut
func New(c *Config) (Interface, error) {
if err := quick.CheckData(c); err != nil {
func New() (Interface, error) {
var conf *Config
var err error
conf, err = LoadConfig()
if err != nil {
conf = &Config{
Version: "0.0.1",
MaxSize: 512000000,
NodeDiskMap: nil,
DonutName: "",
}
if err := quick.CheckData(conf); err != nil {
return nil, iodine.New(err, nil)
}
a := API{config: c}
}
a := API{config: conf}
a.storedBuckets = metadata.NewCache()
a.nodes = make(map[string]node)
a.buckets = make(map[string]bucket)
@ -107,10 +119,30 @@ func New(c *Config) (Interface, error) {
return a, nil
}
// updateConfig loads new config everytime
func (donut API) updateConfig() {
// on error loading config's just return do not modify
conf, err := LoadConfig()
if err != nil {
return
}
if reflect.DeepEqual(donut.config, conf) {
return
}
if conf.MaxSize == donut.config.MaxSize {
return
}
donut.config = conf
donut.objects.SetMaxSize(conf.MaxSize)
}
// GetObject - GET object from cache buffer
func (donut API) GetObject(w io.Writer, bucket string, object string) (int64, error) {
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
if !IsValidBucket(bucket) {
return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
@ -156,6 +188,9 @@ func (donut API) GetObject(w io.Writer, bucket string, object string) (int64, er
func (donut API) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
errParams := map[string]string{
"bucket": bucket,
"object": object,
@ -211,6 +246,9 @@ func (donut API) GetPartialObject(w io.Writer, bucket, object string, start, len
func (donut API) GetBucketMetadata(bucket string) (BucketMetadata, error) {
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
if !IsValidBucket(bucket) {
return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
@ -233,6 +271,9 @@ func (donut API) GetBucketMetadata(bucket string) (BucketMetadata, error) {
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string) error {
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
if !IsValidBucket(bucket) {
return iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
@ -271,25 +312,29 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
// CreateObject - create an object
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string) (ObjectMetadata, error) {
if size > int64(donut.config.MaxSize) {
generic := GenericObjectError{Bucket: bucket, Object: key}
return ObjectMetadata{}, iodine.New(EntityTooLarge{
GenericObjectError: generic,
Size: strconv.FormatInt(size, 10),
MaxSize: strconv.FormatUint(donut.config.MaxSize, 10),
}, nil)
}
contentType := metadata["contentType"]
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
contentType := metadata["contentType"]
objectMetadata, err := donut.createObject(bucket, key, contentType, expectedMD5Sum, size, data)
donut.lock.Unlock()
// free
debug.FreeOSMemory()
return objectMetadata, iodine.New(err, nil)
}
// createObject - PUT object to cache buffer
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) {
if size > int64(donut.config.MaxSize) {
generic := GenericObjectError{Bucket: bucket, Object: key}
return ObjectMetadata{}, iodine.New(EntityTooLarge{
GenericObjectError: generic,
Size: strconv.FormatInt(size, 10),
MaxSize: strconv.FormatUint(donut.config.MaxSize, 10),
}, nil)
}
if !IsValidBucket(bucket) {
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
@ -384,6 +429,9 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
func (donut API) MakeBucket(bucketName, acl string) error {
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
if donut.storedBuckets.Stats().Items == totalBuckets {
return iodine.New(TooManyBuckets{Bucket: bucketName}, nil)
}
@ -422,6 +470,9 @@ func (donut API) MakeBucket(bucketName, acl string) error {
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) {
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
if !IsValidBucket(bucket) {
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
@ -515,6 +566,9 @@ func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
func (donut API) ListBuckets() ([]BucketMetadata, error) {
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
var results []BucketMetadata
if len(donut.config.NodeDiskMap) > 0 {
buckets, err := donut.listBuckets()
@ -538,6 +592,9 @@ func (donut API) ListBuckets() ([]BucketMetadata, error) {
func (donut API) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) {
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
// check if bucket exists
if !IsValidBucket(bucket) {
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)

@ -36,14 +36,8 @@ var _ = Suite(&MyCacheSuite{})
var dc Interface
func (s *MyCacheSuite) SetUpSuite(c *C) {
// test only cache
conf := new(Config)
conf.DonutName = ""
conf.NodeDiskMap = nil
conf.MaxSize = 100000
var err error
dc, err = New(conf)
dc, err = New()
c.Assert(err, IsNil)
// testing empty cache

@ -38,6 +38,9 @@ import (
func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, error) {
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
if !IsValidBucket(bucket) {
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
@ -69,6 +72,9 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, er
func (donut API) AbortMultipartUpload(bucket, key, uploadID string) error {
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
if !IsValidBucket(bucket) {
return iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
@ -85,25 +91,29 @@ func (donut API) AbortMultipartUpload(bucket, key, uploadID string) error {
// CreateObjectPart - create a part in a multipart session
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
if !IsValidBucket(bucket) {
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !IsValidObjectName(key) {
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
}
if !donut.storedBuckets.Exists(bucket) {
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
donut.lock.Unlock()
// possible free
debug.FreeOSMemory()
return etag, iodine.New(err, nil)
}
// createObject - internal wrapper function called by CreateObjectPart
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
if !IsValidBucket(bucket) {
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !IsValidObjectName(key) {
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
}
if !donut.storedBuckets.Exists(bucket) {
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// Verify upload id
if storedBucket.multiPartSession[key].uploadID != uploadID {
@ -191,6 +201,9 @@ func (donut API) cleanupMultipartSession(bucket, key, uploadID string) {
// CompleteMultipartUpload - complete a multipart upload and persist the data
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (ObjectMetadata, error) {
donut.lock.Lock()
// update Config if possible
donut.updateConfig()
if !IsValidBucket(bucket) {
donut.lock.Unlock()
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
@ -269,6 +282,9 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe
// TODO handle delimiter
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
if !donut.storedBuckets.Exists(bucket) {
return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
@ -331,6 +347,9 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
// Verify upload id
donut.lock.Lock()
defer donut.lock.Unlock()
// update Config if possible
donut.updateConfig()
if !donut.storedBuckets.Exists(bucket) {
return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}

@ -32,7 +32,7 @@ type Minio struct {
// New instantiate a new minio API
func New() Minio {
// ignore errors for now
d, _ := donut.LoadDonut()
d, _ := donut.New()
return Minio{
OP: make(chan Operation),
Donut: d,

Loading…
Cancel
Save