Implement metadata cache, metadata cache is used by top level donut right now. Rename trove as data cache

We should use it internally everywhere.
master
Harshavardhana 10 years ago
parent 7d2609856e
commit 0a827305ad
  1. 4
      pkg/donut/cache/data/data.go
  2. 2
      pkg/donut/cache/data/data_test.go
  3. 112
      pkg/donut/cache/metadata/metadata.go
  4. 46
      pkg/donut/cache/metadata/metadata_test.go
  5. 65
      pkg/donut/definitions.go
  6. 92
      pkg/donut/donut-v2.go
  7. 94
      pkg/donut/multipart.go
  8. 124
      pkg/donut/utils.go

@ -14,8 +14,8 @@
* limitations under the License.
*/
// Package trove implements in memory caching methods
package trove
// Package data implements in memory caching methods for data
package data
import (
"sync"

@ -14,7 +14,7 @@
* limitations under the License.
*/
package trove
package data
import (
"testing"

@ -0,0 +1,112 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package metadata implements in memory caching methods for metadata information
package metadata
import (
"sync"
"time"
)
var noExpiration = time.Duration(0)
// Cache holds the required variables to compose an in memory cache system
// which also provides expiring key mechanism and also maxSize
type Cache struct {
// Mutex is used for handling the concurrent
// read/write requests for cache
sync.Mutex
// items hold the cached objects
items map[string]interface{}
// updatedAt holds the time that related item's updated at
updatedAt map[string]time.Time
}
// Stats current cache statistics
type Stats struct {
Items int
}
// NewCache creates an inmemory cache
//
func NewCache() *Cache {
return &Cache{
items: make(map[string]interface{}),
updatedAt: map[string]time.Time{},
}
}
// Stats get current cache statistics
func (r *Cache) Stats() Stats {
return Stats{
Items: len(r.items),
}
}
// GetAll returs all the items
func (r *Cache) GetAll() map[string]interface{} {
r.Lock()
defer r.Unlock()
// copy
items := r.items
return items
}
// Get returns a value of a given key if it exists
func (r *Cache) Get(key string) interface{} {
r.Lock()
defer r.Unlock()
value, ok := r.items[key]
if !ok {
return nil
}
r.updatedAt[key] = time.Now()
return value
}
// Exists returns true if key exists
func (r *Cache) Exists(key string) bool {
r.Lock()
defer r.Unlock()
_, ok := r.items[key]
return ok
}
// Set will persist a value to the cache
func (r *Cache) Set(key string, value interface{}) bool {
r.Lock()
defer r.Unlock()
r.items[key] = value
r.updatedAt[key] = time.Now()
return true
}
// Delete deletes a given key if exists
func (r *Cache) Delete(key string) {
r.Lock()
defer r.Unlock()
r.doDelete(key)
}
func (r *Cache) doDelete(key string) {
if _, ok := r.items[key]; ok {
delete(r.items, key)
delete(r.updatedAt, key)
}
}

@ -0,0 +1,46 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package metadata
import (
"testing"
. "github.com/minio/check"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestCache(c *C) {
cache := NewCache()
data := []byte("Hello, world!")
ok := cache.Set("filename", data)
c.Assert(ok, Equals, true)
storedata := cache.Get("filename")
c.Assert(ok, Equals, true)
c.Assert(data, DeepEquals, storedata)
cache.Delete("filename")
ok = cache.Exists("filename")
c.Assert(ok, Equals, false)
}

@ -71,3 +71,68 @@ type ListObjectsResults struct {
CommonPrefixes []string `json:"commonPrefixes"`
IsTruncated bool `json:"isTruncated"`
}
// MultiPartSession multipart session
type MultiPartSession struct {
totalParts int
uploadID string
initiated time.Time
}
// PartMetadata - various types of individual part resources
type PartMetadata struct {
PartNumber int
LastModified time.Time
ETag string
Size int64
}
// ObjectResourcesMetadata - various types of object resources
type ObjectResourcesMetadata struct {
Bucket string
EncodingType string
Key string
UploadID string
StorageClass string
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
IsTruncated bool
Part []*PartMetadata
}
// UploadMetadata container capturing metadata on in progress multipart upload in a given bucket
type UploadMetadata struct {
Key string
UploadID string
StorageClass string
Initiated time.Time
}
// BucketMultipartResourcesMetadata - various types of bucket resources for inprogress multipart uploads
type BucketMultipartResourcesMetadata struct {
KeyMarker string
UploadIDMarker string
NextKeyMarker string
NextUploadIDMarker string
EncodingType string
MaxUploads int
IsTruncated bool
Upload []*UploadMetadata
Prefix string
Delimiter string
CommonPrefixes []string
}
// BucketResourcesMetadata - various types of bucket resources
type BucketResourcesMetadata struct {
Prefix string
Marker string
NextMarker string
Maxkeys int
EncodingType string
Delimiter string
IsTruncated bool
CommonPrefixes []string
}

@ -32,7 +32,8 @@ import (
"sync"
"time"
"github.com/minio/minio/pkg/donut/trove"
"github.com/minio/minio/pkg/donut/cache/data"
"github.com/minio/minio/pkg/donut/cache/metadata"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/quick"
)
@ -55,9 +56,9 @@ type Config struct {
type API struct {
config *Config
lock *sync.Mutex
objects *trove.Cache
multiPartObjects *trove.Cache
storedBuckets map[string]storedBucket
objects *data.Cache
multiPartObjects *data.Cache
storedBuckets *metadata.Cache
nodes map[string]node
buckets map[string]bucket
}
@ -67,14 +68,7 @@ type storedBucket struct {
bucketMetadata BucketMetadata
objectMetadata map[string]ObjectMetadata
partMetadata map[string]PartMetadata
multiPartSession map[string]multiPartSession
}
// multiPartSession multipart session
type multiPartSession struct {
totalParts int
uploadID string
initiated time.Time
multiPartSession map[string]MultiPartSession
}
// New instantiate a new donut
@ -83,11 +77,11 @@ func New(c *Config) (Interface, error) {
return nil, iodine.New(err, nil)
}
a := API{config: c}
a.storedBuckets = make(map[string]storedBucket)
a.storedBuckets = metadata.NewCache()
a.nodes = make(map[string]node)
a.buckets = make(map[string]bucket)
a.objects = trove.NewCache(a.config.MaxSize, a.config.Expiration)
a.multiPartObjects = trove.NewCache(0, time.Duration(0))
a.objects = data.NewCache(a.config.MaxSize, a.config.Expiration)
a.multiPartObjects = data.NewCache(0, time.Duration(0))
a.objects.OnExpired = a.expiredObject
a.multiPartObjects.OnExpired = a.expiredPart
a.lock = new(sync.Mutex)
@ -111,9 +105,7 @@ func New(c *Config) (Interface, error) {
return nil, iodine.New(err, nil)
}
for k, v := range buckets {
storedBucket := a.storedBuckets[k]
storedBucket.bucketMetadata = v
a.storedBuckets[k] = storedBucket
a.storedBuckets.Set(k, v)
}
}
return a, nil
@ -129,7 +121,7 @@ func (donut API) GetObject(w io.Writer, bucket string, object string) (int64, er
if !IsValidObjectName(object) {
return 0, iodine.New(ObjectNameInvalid{Object: object}, nil)
}
if _, ok := donut.storedBuckets[bucket]; ok == false {
if !donut.storedBuckets.Exists(bucket) {
return 0, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
objectKey := bucket + "/" + object
@ -226,19 +218,19 @@ func (donut API) GetBucketMetadata(bucket string) (BucketMetadata, error) {
if !IsValidBucket(bucket) {
return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if _, ok := donut.storedBuckets[bucket]; ok == false {
if !donut.storedBuckets.Exists(bucket) {
if len(donut.config.NodeDiskMap) > 0 {
bucketMetadata, err := donut.getBucketMetadata(bucket)
if err != nil {
return BucketMetadata{}, iodine.New(err, nil)
}
storedBucket := donut.storedBuckets[bucket]
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
storedBucket.bucketMetadata = bucketMetadata
donut.storedBuckets[bucket] = storedBucket
donut.storedBuckets.Set(bucket, storedBucket)
}
return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
return donut.storedBuckets[bucket].bucketMetadata, nil
return donut.storedBuckets.Get(bucket).(storedBucket).bucketMetadata, nil
}
// SetBucketMetadata -
@ -248,7 +240,7 @@ func (donut API) SetBucketMetadata(bucket string, metadata map[string]string) er
if !IsValidBucket(bucket) {
return iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if _, ok := donut.storedBuckets[bucket]; ok == false {
if !donut.storedBuckets.Exists(bucket) {
return iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
if len(donut.config.NodeDiskMap) > 0 {
@ -256,9 +248,9 @@ func (donut API) SetBucketMetadata(bucket string, metadata map[string]string) er
return iodine.New(err, nil)
}
}
storedBucket := donut.storedBuckets[bucket]
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
storedBucket.bucketMetadata.ACL = BucketACL(metadata["acl"])
donut.storedBuckets[bucket] = storedBucket
donut.storedBuckets.Set(bucket, storedBucket)
return nil
}
@ -308,10 +300,10 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
if !IsValidObjectName(key) {
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
}
if _, ok := donut.storedBuckets[bucket]; ok == false {
if !donut.storedBuckets.Exists(bucket) {
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := donut.storedBuckets[bucket]
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// get object key
objectKey := bucket + "/" + key
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
@ -337,7 +329,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
return ObjectMetadata{}, iodine.New(err, nil)
}
storedBucket.objectMetadata[objectKey] = objMetadata
donut.storedBuckets[bucket] = storedBucket
donut.storedBuckets.Set(bucket, storedBucket)
return objMetadata, nil
}
// calculate md5
@ -388,7 +380,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
}
storedBucket.objectMetadata[objectKey] = newObject
donut.storedBuckets[bucket] = storedBucket
donut.storedBuckets.Set(bucket, storedBucket)
return newObject, nil
}
@ -396,7 +388,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
func (donut API) MakeBucket(bucketName, acl string) error {
donut.lock.Lock()
defer donut.lock.Unlock()
if len(donut.storedBuckets) == totalBuckets {
if donut.storedBuckets.Stats().Items == totalBuckets {
return iodine.New(TooManyBuckets{Bucket: bucketName}, nil)
}
if !IsValidBucket(bucketName) {
@ -405,7 +397,7 @@ func (donut API) MakeBucket(bucketName, acl string) error {
if !IsValidBucketACL(acl) {
return iodine.New(InvalidACL{ACL: acl}, nil)
}
if _, ok := donut.storedBuckets[bucketName]; ok == true {
if donut.storedBuckets.Exists(bucketName) {
return iodine.New(BucketExists{Bucket: bucketName}, nil)
}
@ -420,13 +412,13 @@ func (donut API) MakeBucket(bucketName, acl string) error {
}
var newBucket = storedBucket{}
newBucket.objectMetadata = make(map[string]ObjectMetadata)
newBucket.multiPartSession = make(map[string]multiPartSession)
newBucket.multiPartSession = make(map[string]MultiPartSession)
newBucket.partMetadata = make(map[string]PartMetadata)
newBucket.bucketMetadata = BucketMetadata{}
newBucket.bucketMetadata.Name = bucketName
newBucket.bucketMetadata.Created = time.Now().UTC()
newBucket.bucketMetadata.ACL = BucketACL(acl)
donut.storedBuckets[bucketName] = newBucket
donut.storedBuckets.Set(bucketName, newBucket)
return nil
}
@ -440,7 +432,7 @@ func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata) (
if !IsValidPrefix(resources.Prefix) {
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil)
}
if _, ok := donut.storedBuckets[bucket]; ok == false {
if !donut.storedBuckets.Exists(bucket) {
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
var results []ObjectMetadata
@ -458,7 +450,7 @@ func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata) (
}
resources.CommonPrefixes = listObjects.CommonPrefixes
resources.IsTruncated = listObjects.IsTruncated
if resources.IsTruncated && resources.IsDelimiterSet() {
if resources.IsTruncated && resources.Delimiter != "" {
resources.NextMarker = results[len(results)-1].Object
}
for key := range listObjects.Objects {
@ -470,7 +462,7 @@ func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata) (
}
return results, resources, nil
}
storedBucket := donut.storedBuckets[bucket]
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
for key := range storedBucket.objectMetadata {
if strings.HasPrefix(key, bucket+"/") {
key = key[len(bucket)+1:]
@ -503,7 +495,7 @@ func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata) (
for _, key := range filteredKeys {
if len(results) == resources.Maxkeys {
resources.IsTruncated = true
if resources.IsTruncated && resources.IsDelimiterSet() {
if resources.IsTruncated && resources.Delimiter != "" {
resources.NextMarker = results[len(results)-1].Object
}
return results, resources, nil
@ -528,8 +520,19 @@ func (donut API) ListBuckets() ([]BucketMetadata, error) {
donut.lock.Lock()
defer donut.lock.Unlock()
var results []BucketMetadata
for _, bucket := range donut.storedBuckets {
results = append(results, bucket.bucketMetadata)
if len(donut.config.NodeDiskMap) > 0 {
buckets, err := donut.listBuckets()
if err != nil {
return nil, iodine.New(err, nil)
}
for _, bucketMetadata := range buckets {
results = append(results, bucketMetadata)
}
sort.Sort(byBucketName(results))
return results, nil
}
for _, bucket := range donut.storedBuckets.GetAll() {
results = append(results, bucket.(storedBucket).bucketMetadata)
}
sort.Sort(byBucketName(results))
return results, nil
@ -546,10 +549,10 @@ func (donut API) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) {
if !IsValidObjectName(key) {
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
}
if _, ok := donut.storedBuckets[bucket]; ok == false {
if !donut.storedBuckets.Exists(bucket) {
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := donut.storedBuckets[bucket]
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
objectKey := bucket + "/" + key
if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true {
return objMetadata, nil
@ -561,6 +564,7 @@ func (donut API) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) {
}
// update
storedBucket.objectMetadata[objectKey] = objMetadata
donut.storedBuckets.Set(bucket, storedBucket)
return objMetadata, nil
}
return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil)
@ -572,8 +576,8 @@ func (donut API) expiredObject(a ...interface{}) {
cacheStats.Bytes, cacheStats.Items, cacheStats.Expired)
key := a[0].(string)
// loop through all buckets
for _, storedBucket := range donut.storedBuckets {
delete(storedBucket.objectMetadata, key)
for _, bucket := range donut.storedBuckets.GetAll() {
delete(bucket.(storedBucket).objectMetadata, key)
}
debug.FreeOSMemory()
}

@ -44,10 +44,10 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, er
if !IsValidObjectName(key) {
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
}
if _, ok := donut.storedBuckets[bucket]; ok == false {
if !donut.storedBuckets.Exists(bucket) {
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := donut.storedBuckets[bucket]
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
objectKey := bucket + "/" + key
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
return "", iodine.New(ObjectExists{Object: key}, nil)
@ -56,12 +56,12 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, er
uploadIDSum := sha512.Sum512(id)
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
donut.storedBuckets[bucket].multiPartSession[key] = multiPartSession{
storedBucket.multiPartSession[key] = MultiPartSession{
uploadID: uploadID,
initiated: time.Now(),
totalParts: 0,
}
donut.storedBuckets.Set(bucket, storedBucket)
return uploadID, nil
}
@ -69,11 +69,16 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, er
func (donut API) AbortMultipartUpload(bucket, key, uploadID string) error {
donut.lock.Lock()
defer donut.lock.Unlock()
storedBucket := donut.storedBuckets[bucket]
if !IsValidBucket(bucket) {
return iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !donut.storedBuckets.Exists(bucket) {
return iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
if storedBucket.multiPartSession[key].uploadID != uploadID {
return iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
}
donut.cleanupMultiparts(bucket, key, uploadID)
donut.cleanupMultipartSession(bucket, key, uploadID)
return nil
}
@ -84,36 +89,38 @@ func getMultipartKey(key string, uploadID string, partNumber int) string {
// CreateObjectPart -
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
// Verify upload id
storedBucket := donut.storedBuckets[bucket]
if storedBucket.multiPartSession[key].uploadID != uploadID {
return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
if !IsValidBucket(bucket) {
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
donut.lock.Lock()
etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
if err != nil {
return "", iodine.New(err, nil)
if !IsValidObjectName(key) {
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
}
donut.lock.Unlock()
// free
if !donut.storedBuckets.Exists(bucket) {
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
var etag string
var err error
{
donut.lock.Lock()
etag, err = donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
if err != nil {
return "", iodine.New(err, nil)
}
donut.lock.Unlock()
}
// possible free
debug.FreeOSMemory()
return etag, nil
}
// createObject - PUT object to cache buffer
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
if !IsValidBucket(bucket) {
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !IsValidObjectName(key) {
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
}
if _, ok := donut.storedBuckets[bucket]; ok == false {
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// Verify upload id
if storedBucket.multiPartSession[key].uploadID != uploadID {
return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
}
storedBucket := donut.storedBuckets[bucket]
// get object key
partKey := bucket + "/" + getMultipartKey(key, uploadID, partID)
if _, ok := storedBucket.partMetadata[partKey]; ok == true {
@ -179,20 +186,18 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
multiPartSession := storedBucket.multiPartSession[key]
multiPartSession.totalParts++
storedBucket.multiPartSession[key] = multiPartSession
donut.storedBuckets[bucket] = storedBucket
donut.storedBuckets.Set(bucket, storedBucket)
return md5Sum, nil
}
func (donut API) cleanupMultipartSession(bucket, key, uploadID string) {
delete(donut.storedBuckets[bucket].multiPartSession, key)
}
func (donut API) cleanupMultiparts(bucket, key, uploadID string) {
for i := 1; i <= donut.storedBuckets[bucket].multiPartSession[key].totalParts; i++ {
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
for i := 1; i <= storedBucket.multiPartSession[key].totalParts; i++ {
objectKey := bucket + "/" + getMultipartKey(key, uploadID, i)
donut.multiPartObjects.Delete(objectKey)
}
delete(storedBucket.multiPartSession, key)
donut.storedBuckets.Set(bucket, storedBucket)
}
// CompleteMultipartUpload -
@ -205,10 +210,10 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, parts map
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
}
// Verify upload id
if _, ok := donut.storedBuckets[bucket]; ok == false {
if !donut.storedBuckets.Exists(bucket) {
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := donut.storedBuckets[bucket]
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
if storedBucket.multiPartSession[key].uploadID != uploadID {
return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
}
@ -250,8 +255,8 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, parts map
return ObjectMetadata{}, iodine.New(err, nil)
}
fullObject.Reset()
donut.lock.Lock()
donut.cleanupMultiparts(bucket, key, uploadID)
donut.cleanupMultipartSession(bucket, key, uploadID)
donut.lock.Unlock()
return objectMetadata, nil
@ -269,10 +274,10 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe
// TODO handle delimiter
donut.lock.Lock()
defer donut.lock.Unlock()
if _, ok := donut.storedBuckets[bucket]; ok == false {
if !donut.storedBuckets.Exists(bucket) {
return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := donut.storedBuckets[bucket]
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
var uploads []*UploadMetadata
for key, session := range storedBucket.multiPartSession {
@ -331,10 +336,10 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
// Verify upload id
donut.lock.Lock()
defer donut.lock.Unlock()
if _, ok := donut.storedBuckets[bucket]; ok == false {
if !donut.storedBuckets.Exists(bucket) {
return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := donut.storedBuckets[bucket]
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
if _, ok := storedBucket.multiPartSession[key]; ok == false {
return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil)
}
@ -374,8 +379,11 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
func (donut API) expiredPart(a ...interface{}) {
key := a[0].(string)
// loop through all buckets
for _, storedBucket := range donut.storedBuckets {
delete(storedBucket.partMetadata, key)
buckets := donut.storedBuckets.GetAll()
for bucketName, bucket := range buckets {
b := bucket.(storedBucket)
delete(b.partMetadata, key)
donut.storedBuckets.Set(bucketName, b)
}
debug.FreeOSMemory()
}

@ -1,9 +1,24 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"regexp"
"strings"
"time"
"unicode/utf8"
)
@ -36,93 +51,6 @@ func (b BucketACL) IsPublicReadWrite() bool {
return b == BucketACL("public-read-write")
}
// FilterMode type
type FilterMode int
// FilterMode list
const (
DelimiterPrefixMode FilterMode = iota
DelimiterMode
PrefixMode
DefaultMode
)
// PartMetadata - various types of individual part resources
type PartMetadata struct {
PartNumber int
LastModified time.Time
ETag string
Size int64
}
// ObjectResourcesMetadata - various types of object resources
type ObjectResourcesMetadata struct {
Bucket string
EncodingType string
Key string
UploadID string
StorageClass string
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
IsTruncated bool
Part []*PartMetadata
}
// UploadMetadata container capturing metadata on in progress multipart upload in a given bucket
type UploadMetadata struct {
Key string
UploadID string
StorageClass string
Initiated time.Time
}
// BucketMultipartResourcesMetadata - various types of bucket resources for inprogress multipart uploads
type BucketMultipartResourcesMetadata struct {
KeyMarker string
UploadIDMarker string
NextKeyMarker string
NextUploadIDMarker string
EncodingType string
MaxUploads int
IsTruncated bool
Upload []*UploadMetadata
Prefix string
Delimiter string
CommonPrefixes []string
}
// BucketResourcesMetadata - various types of bucket resources
type BucketResourcesMetadata struct {
Prefix string
Marker string
NextMarker string
Maxkeys int
EncodingType string
Delimiter string
IsTruncated bool
CommonPrefixes []string
Mode FilterMode
}
// GetMode - Populate filter mode
func GetMode(resources BucketResourcesMetadata) FilterMode {
var f FilterMode
switch true {
case resources.Delimiter != "" && resources.Prefix != "":
f = DelimiterPrefixMode
case resources.Delimiter != "" && resources.Prefix == "":
f = DelimiterMode
case resources.Delimiter == "" && resources.Prefix != "":
f = PrefixMode
case resources.Delimiter == "" && resources.Prefix == "":
f = DefaultMode
}
return f
}
// IsValidBucketACL - is provided acl string supported
func IsValidBucketACL(acl string) bool {
switch acl {
@ -140,26 +68,6 @@ func IsValidBucketACL(acl string) bool {
}
}
// IsDelimiterPrefixSet Delimiter and Prefix set
func (b BucketResourcesMetadata) IsDelimiterPrefixSet() bool {
return b.Mode == DelimiterPrefixMode
}
// IsDelimiterSet Delimiter set
func (b BucketResourcesMetadata) IsDelimiterSet() bool {
return b.Mode == DelimiterMode
}
// IsPrefixSet Prefix set
func (b BucketResourcesMetadata) IsPrefixSet() bool {
return b.Mode == PrefixMode
}
// IsDefault No query values
func (b BucketResourcesMetadata) IsDefault() bool {
return b.Mode == DefaultMode
}
// IsValidBucket - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func IsValidBucket(bucket string) bool {

Loading…
Cancel
Save