Add Append() method to trove cache for appending data to an existing key

This largely avoids a large buffer copy which would accumulate inside proxyReader{}

This patch also implements "initialize()" function to init and populate data
on all the existing buckets, avoiding the redundant ListBuckets() invoked by
every API call.
master
Harshavardhana 10 years ago
parent 762aae7c32
commit 05f8654e3d
  1. 168
      pkg/storage/drivers/donut/donut.go
  2. 148
      pkg/storage/drivers/donut/multipart.go
  3. 37
      pkg/storage/trove/trove.go

@ -69,15 +69,14 @@ type donutDriver struct {
}
// This is a dummy nodeDiskMap which is going to be deprecated soon
// once the Management API is standardized, this map is useful for now
// to show multi disk API correctness and parity calculation
//
// Ideally this should be obtained from per node configuration file
func createNodeDiskMap(p string) map[string][]string {
// once the Management API is standardized, and we have way of adding
// and removing disks. This is useful for now to take inputs from CLI
func createNodeDiskMap(paths []string) map[string][]string {
if len(paths) == 1 {
nodes := make(map[string][]string)
nodes["localhost"] = make([]string, 16)
for i := 0; i < len(nodes["localhost"]); i++ {
diskPath := filepath.Join(p, strconv.Itoa(i))
diskPath := filepath.Join(paths[0], strconv.Itoa(i))
if _, err := os.Stat(diskPath); err != nil {
if os.IsNotExist(err) {
os.MkdirAll(diskPath, 0700)
@ -86,12 +85,7 @@ func createNodeDiskMap(p string) map[string][]string {
nodes["localhost"][i] = diskPath
}
return nodes
}
// This is a dummy nodeDiskMap which is going to be deprecated soon
// once the Management API is standardized, and we have way of adding
// and removing disks. This is useful for now to take inputs from CLI
func createNodeDiskMapFromSlice(paths []string) map[string][]string {
}
diskPaths := make([]string, len(paths))
nodes := make(map[string][]string)
for i, p := range paths {
@ -107,6 +101,43 @@ func createNodeDiskMapFromSlice(paths []string) map[string][]string {
return nodes
}
func initialize(d *donutDriver) {
// Soon to be user configurable, when Management API is available
// we should remove "default" to something which is passed down
// from configuration paramters
var err error
d.donut, err = donut.NewDonut("default", createNodeDiskMap(d.paths))
if err != nil {
panic(iodine.New(err, nil))
}
buckets, err := d.donut.ListBuckets()
if err != nil {
panic(iodine.New(err, nil))
}
for bucketName, metadata := range buckets {
d.lock.RLock()
storedBucket := d.storedBuckets[bucketName]
d.lock.RUnlock()
if len(storedBucket.multiPartSession) == 0 {
storedBucket.multiPartSession = make(map[string]multiPartSession)
}
if len(storedBucket.objectMetadata) == 0 {
storedBucket.objectMetadata = make(map[string]drivers.ObjectMetadata)
}
if len(storedBucket.partMetadata) == 0 {
storedBucket.partMetadata = make(map[string]drivers.PartMetadata)
}
storedBucket.bucketMetadata = drivers.BucketMetadata{
Name: metadata.Name,
Created: metadata.Created,
ACL: drivers.BucketACL(metadata.ACL),
}
d.lock.Lock()
d.storedBuckets[bucketName] = storedBucket
d.lock.Unlock()
}
}
// Start a single disk subsystem
func Start(paths []string, maxSize uint64, expiration time.Duration) (chan<- string, <-chan error, drivers.Driver) {
ctrlChannel := make(chan string)
@ -126,34 +157,17 @@ func Start(paths []string, maxSize uint64, expiration time.Duration) (chan<- str
// set up memory expiration
driver.objects.ExpireObjects(time.Second * 5)
// Soon to be user configurable, when Management API is available
// we should remove "default" to something which is passed down
// from configuration paramters
switch {
case len(paths) == 1:
d, err := donut.NewDonut("default", createNodeDiskMap(paths[0]))
if err != nil {
err = iodine.New(err, nil)
log.Error.Println(err)
}
driver.donut = d
default:
d, err := donut.NewDonut("default", createNodeDiskMapFromSlice(paths))
if err != nil {
err = iodine.New(err, nil)
log.Error.Println(err)
}
driver.donut = d
}
driver.paths = paths
driver.lock = new(sync.RWMutex)
initialize(driver)
go start(ctrlChannel, errorChannel, driver)
return ctrlChannel, errorChannel, driver
}
func start(ctrlChannel <-chan string, errorChannel chan<- error, driver *donutDriver) {
close(errorChannel)
defer close(errorChannel)
}
func (d donutDriver) expiredObject(a ...interface{}) {
@ -186,31 +200,8 @@ func (d donutDriver) ListBuckets() (results []drivers.BucketMetadata, err error)
if d.donut == nil {
return nil, iodine.New(drivers.InternalError{}, nil)
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return nil, iodine.New(err, nil)
}
for bucketName, metadata := range buckets {
result := drivers.BucketMetadata{
Name: metadata.Name,
Created: metadata.Created,
ACL: drivers.BucketACL(metadata.ACL),
}
d.lock.Lock()
storedBucket := d.storedBuckets[bucketName]
if len(storedBucket.multiPartSession) == 0 {
storedBucket.multiPartSession = make(map[string]multiPartSession)
}
if len(storedBucket.objectMetadata) == 0 {
storedBucket.objectMetadata = make(map[string]drivers.ObjectMetadata)
}
if len(storedBucket.partMetadata) == 0 {
storedBucket.partMetadata = make(map[string]drivers.PartMetadata)
}
storedBucket.bucketMetadata = result
d.storedBuckets[bucketName] = storedBucket
d.lock.Unlock()
results = append(results, result)
for _, storedBucket := range d.storedBuckets {
results = append(results, storedBucket.bucketMetadata)
}
sort.Sort(byBucketName(results))
return results, nil
@ -229,7 +220,7 @@ func (d donutDriver) CreateBucket(bucketName, acl string) error {
if !drivers.IsValidBucketACL(acl) {
return iodine.New(drivers.InvalidACL{ACL: acl}, nil)
}
if drivers.IsValidBucket(bucketName) && !strings.Contains(bucketName, ".") {
if drivers.IsValidBucket(bucketName) {
if strings.TrimSpace(acl) == "" {
acl = "private"
}
@ -244,6 +235,15 @@ func (d donutDriver) CreateBucket(bucketName, acl string) error {
newBucket.objectMetadata = make(map[string]drivers.ObjectMetadata)
newBucket.multiPartSession = make(map[string]multiPartSession)
newBucket.partMetadata = make(map[string]drivers.PartMetadata)
metadata, err := d.donut.GetBucketMetadata(bucketName)
if err != nil {
return iodine.New(err, nil)
}
newBucket.bucketMetadata = drivers.BucketMetadata{
Name: metadata.Name,
Created: metadata.Created,
ACL: drivers.BucketACL(metadata.ACL),
}
d.storedBuckets[bucketName] = newBucket
return nil
}
@ -268,7 +268,7 @@ func (d donutDriver) GetBucketMetadata(bucketName string) (drivers.BucketMetadat
return drivers.BucketMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
}
bucketMetadata := drivers.BucketMetadata{
Name: bucketName,
Name: metadata.Name,
Created: metadata.Created,
ACL: drivers.BucketACL(metadata.ACL),
}
@ -302,8 +302,6 @@ func (d donutDriver) SetBucketMetadata(bucketName, acl string) error {
// GetObject retrieves an object and writes it to a writer
func (d donutDriver) GetObject(w io.Writer, bucketName, objectName string) (int64, error) {
d.lock.RLock()
defer d.lock.RUnlock()
if d.donut == nil {
return 0, iodine.New(drivers.InternalError{}, nil)
}
@ -316,15 +314,24 @@ func (d donutDriver) GetObject(w io.Writer, bucketName, objectName string) (int6
if _, ok := d.storedBuckets[bucketName]; ok == false {
return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
}
d.lock.RLock()
defer d.lock.RUnlock()
objectKey := bucketName + "/" + objectName
data, ok := d.objects.Get(objectKey)
if !ok {
reader, size, err := d.donut.GetObject(bucketName, objectName)
if err != nil {
switch iodine.ToError(err).(type) {
case donut.BucketNotFound:
return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
case donut.ObjectNotFound:
return 0, iodine.New(drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}, nil)
default:
return 0, iodine.New(drivers.InternalError{}, nil)
}
}
n, err := io.CopyN(w, reader, size)
if err != nil {
@ -372,10 +379,17 @@ func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string
if !ok {
reader, size, err := d.donut.GetObject(bucketName, objectName)
if err != nil {
switch iodine.ToError(err).(type) {
case donut.BucketNotFound:
return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
case donut.ObjectNotFound:
return 0, iodine.New(drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}, nil)
default:
return 0, iodine.New(drivers.InternalError{}, nil)
}
}
defer reader.Close()
if start > size || (start+length-1) > size {
@ -486,25 +500,32 @@ func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketReso
}
type proxyReader struct {
io.Reader
readBytes []byte
reader io.Reader
driver donutDriver
object string
}
func (r *proxyReader) Read(p []byte) (n int, err error) {
n, err = r.Reader.Read(p)
n, err = r.reader.Read(p)
if err == io.EOF || err == io.ErrUnexpectedEOF {
r.readBytes = append(r.readBytes, p[0:n]...)
ok := r.driver.objects.Append(r.object, p[0:n])
if !ok {
return n, io.ErrShortBuffer
}
return
}
if err != nil {
return
}
r.readBytes = append(r.readBytes, p[0:n]...)
ok := r.driver.objects.Append(r.object, p[0:n])
if !ok {
return n, io.ErrShortBuffer
}
return
}
func newProxyReader(r io.Reader) *proxyReader {
return &proxyReader{r, nil}
func newProxyReader(r io.Reader, d donutDriver, k string) *proxyReader {
return &proxyReader{reader: r, driver: d, object: k}
}
// CreateObject creates a new object
@ -552,7 +573,7 @@ func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedM
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
newReader := newProxyReader(reader)
newReader := newProxyReader(reader, d, objectKey)
calculatedMD5Sum, err := d.donut.PutObject(bucketName, objectName, expectedMD5Sum, newReader, metadata)
if err != nil {
switch iodine.ToError(err).(type) {
@ -561,14 +582,9 @@ func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedM
}
return "", iodine.New(err, errParams)
}
// get object key
ok := d.objects.Set(objectKey, newReader.readBytes)
// setting up for de-allocation
newReader.readBytes = nil
// free up
go debug.FreeOSMemory()
if !ok {
return "", iodine.New(drivers.InternalError{}, nil)
}
objectMetadata, err := d.donut.GetObjectMetadata(bucketName, objectName)
if err != nil {
return "", iodine.New(err, nil)

@ -64,33 +64,6 @@ func (d donutDriver) NewMultipartUpload(bucketName, objectName, contentType stri
d.lock.RUnlock()
return "", iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil)
}
d.lock.RUnlock()
buckets, err := d.donut.ListBuckets()
if err != nil {
return "", iodine.New(err, nil)
}
for bucketName, metadata := range buckets {
result := drivers.BucketMetadata{
Name: metadata.Name,
Created: metadata.Created,
ACL: drivers.BucketACL(metadata.ACL),
}
d.lock.Lock()
storedBucket := d.storedBuckets[bucketName]
storedBucket.bucketMetadata = result
if len(storedBucket.multiPartSession) == 0 {
storedBucket.multiPartSession = make(map[string]multiPartSession)
}
if len(storedBucket.objectMetadata) == 0 {
storedBucket.objectMetadata = make(map[string]drivers.ObjectMetadata)
}
if len(storedBucket.partMetadata) == 0 {
storedBucket.partMetadata = make(map[string]drivers.PartMetadata)
}
d.storedBuckets[bucketName] = storedBucket
d.lock.Unlock()
}
d.lock.RLock()
if _, ok := d.storedBuckets[bucketName]; ok == false {
d.lock.RUnlock()
return "", iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
@ -117,17 +90,17 @@ func (d donutDriver) NewMultipartUpload(bucketName, objectName, contentType stri
return uploadID, nil
}
func (d donutDriver) AbortMultipartUpload(bucket, key, uploadID string) error {
func (d donutDriver) AbortMultipartUpload(bucketName, objectName, uploadID string) error {
d.lock.RLock()
storedBucket := d.storedBuckets[bucket]
if storedBucket.multiPartSession[key].uploadID != uploadID {
storedBucket := d.storedBuckets[bucketName]
if storedBucket.multiPartSession[objectName].uploadID != uploadID {
d.lock.RUnlock()
return iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
}
d.lock.RUnlock()
d.cleanupMultiparts(bucket, key, uploadID)
d.cleanupMultipartSession(bucket, key, uploadID)
d.cleanupMultiparts(bucketName, objectName, uploadID)
d.cleanupMultipartSession(bucketName, objectName, uploadID)
return nil
}
@ -135,17 +108,17 @@ func getMultipartKey(key string, uploadID string, partNumber int) string {
return key + "?uploadId=" + uploadID + "&partNumber=" + strconv.Itoa(partNumber)
}
func (d donutDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
func (d donutDriver) CreateObjectPart(bucketName, objectName, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
// Verify upload id
d.lock.RLock()
storedBucket := d.storedBuckets[bucket]
if storedBucket.multiPartSession[key].uploadID != uploadID {
storedBucket := d.storedBuckets[bucketName]
if storedBucket.multiPartSession[objectName].uploadID != uploadID {
d.lock.RUnlock()
return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
}
d.lock.RUnlock()
etag, err := d.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
etag, err := d.createObjectPart(bucketName, objectName, uploadID, partID, "", expectedMD5Sum, size, data)
if err != nil {
return "", iodine.New(err, nil)
}
@ -155,23 +128,23 @@ func (d donutDriver) CreateObjectPart(bucket, key, uploadID string, partID int,
}
// createObject - PUT object to memory buffer
func (d donutDriver) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
func (d donutDriver) createObjectPart(bucketName, objectName, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
d.lock.RLock()
if !drivers.IsValidBucket(bucket) {
if !drivers.IsValidBucket(bucketName) {
d.lock.RUnlock()
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil)
}
if !drivers.IsValidObjectName(key) {
if !drivers.IsValidObjectName(objectName) {
d.lock.RUnlock()
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
return "", iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil)
}
if _, ok := d.storedBuckets[bucket]; ok == false {
if _, ok := d.storedBuckets[bucketName]; ok == false {
d.lock.RUnlock()
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return "", iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
}
storedBucket := d.storedBuckets[bucket]
storedBucket := d.storedBuckets[bucketName]
// get object key
partKey := bucket + "/" + getMultipartKey(key, uploadID, partID)
partKey := bucketName + "/" + getMultipartKey(objectName, uploadID, partID)
if _, ok := storedBucket.partMetadata[partKey]; ok == true {
d.lock.RUnlock()
return storedBucket.partMetadata[partKey].ETag, nil
@ -225,7 +198,11 @@ func (d donutDriver) createObjectPart(bucket, key, uploadID string, partID int,
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil)
return "", iodine.New(drivers.BadDigest{
Md5: expectedMD5Sum,
Bucket: bucketName,
Key: objectName,
}, nil)
}
}
newPart := drivers.PartMetadata{
@ -237,43 +214,43 @@ func (d donutDriver) createObjectPart(bucket, key, uploadID string, partID int,
d.lock.Lock()
storedBucket.partMetadata[partKey] = newPart
multiPartSession := storedBucket.multiPartSession[key]
multiPartSession := storedBucket.multiPartSession[objectName]
multiPartSession.totalParts++
storedBucket.multiPartSession[key] = multiPartSession
d.storedBuckets[bucket] = storedBucket
storedBucket.multiPartSession[objectName] = multiPartSession
d.storedBuckets[bucketName] = storedBucket
d.lock.Unlock()
return md5Sum, nil
}
func (d donutDriver) cleanupMultipartSession(bucket, key, uploadID string) {
func (d donutDriver) cleanupMultipartSession(bucketName, objectName, uploadID string) {
d.lock.Lock()
defer d.lock.Unlock()
delete(d.storedBuckets[bucket].multiPartSession, key)
delete(d.storedBuckets[bucketName].multiPartSession, objectName)
}
func (d donutDriver) cleanupMultiparts(bucket, key, uploadID string) {
for i := 1; i <= d.storedBuckets[bucket].multiPartSession[key].totalParts; i++ {
objectKey := bucket + "/" + getMultipartKey(key, uploadID, i)
func (d donutDriver) cleanupMultiparts(bucketName, objectName, uploadID string) {
for i := 1; i <= d.storedBuckets[bucketName].multiPartSession[objectName].totalParts; i++ {
objectKey := bucketName + "/" + getMultipartKey(objectName, uploadID, i)
d.multiPartObjects.Delete(objectKey)
}
}
func (d donutDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
if !drivers.IsValidBucket(bucket) {
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
func (d donutDriver) CompleteMultipartUpload(bucketName, objectName, uploadID string, parts map[int]string) (string, error) {
if !drivers.IsValidBucket(bucketName) {
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil)
}
if !drivers.IsValidObjectName(key) {
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
if !drivers.IsValidObjectName(objectName) {
return "", iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil)
}
// Verify upload id
d.lock.RLock()
if _, ok := d.storedBuckets[bucket]; ok == false {
if _, ok := d.storedBuckets[bucketName]; ok == false {
d.lock.RUnlock()
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return "", iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
}
storedBucket := d.storedBuckets[bucket]
if storedBucket.multiPartSession[key].uploadID != uploadID {
storedBucket := d.storedBuckets[bucketName]
if storedBucket.multiPartSession[objectName].uploadID != uploadID {
d.lock.RUnlock()
return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
}
@ -284,7 +261,7 @@ func (d donutDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts
var fullObject bytes.Buffer
for i := 1; i <= len(parts); i++ {
recvMD5 := parts[i]
object, ok := d.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i))
object, ok := d.multiPartObjects.Get(bucketName + "/" + getMultipartKey(objectName, uploadID, i))
if ok == false {
d.lock.Unlock()
return "", iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
@ -297,7 +274,11 @@ func (d donutDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts
return "", iodine.New(drivers.InvalidDigest{Md5: recvMD5}, nil)
}
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
return "", iodine.New(drivers.BadDigest{Md5: recvMD5, Bucket: bucket, Key: getMultipartKey(key, uploadID, i)}, nil)
return "", iodine.New(drivers.BadDigest{
Md5: recvMD5,
Bucket: bucketName,
Key: getMultipartKey(objectName, uploadID, i),
}, nil)
}
_, err = io.Copy(&fullObject, bytes.NewBuffer(object))
if err != nil {
@ -311,15 +292,15 @@ func (d donutDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts
md5sumSlice := md5.Sum(fullObject.Bytes())
// this is needed for final verification inside CreateObject, do not convert this to hex
md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:])
etag, err := d.CreateObject(bucket, key, "", md5sum, size, &fullObject)
etag, err := d.CreateObject(bucketName, objectName, "", md5sum, size, &fullObject)
if err != nil {
// No need to call internal cleanup functions here, caller will call AbortMultipartUpload()
// which would in-turn cleanup properly in accordance with S3 Spec
return "", iodine.New(err, nil)
}
fullObject.Reset()
d.cleanupMultiparts(bucket, key, uploadID)
d.cleanupMultipartSession(bucket, key, uploadID)
d.cleanupMultiparts(bucketName, objectName, uploadID)
d.cleanupMultipartSession(bucketName, objectName, uploadID)
return etag, nil
}
@ -330,14 +311,13 @@ func (a byKey) Len() int { return len(a) }
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
func (d donutDriver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) {
// TODO handle delimiter
func (d donutDriver) ListMultipartUploads(bucketName string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) {
d.lock.RLock()
defer d.lock.RUnlock()
if _, ok := d.storedBuckets[bucket]; ok == false {
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
if _, ok := d.storedBuckets[bucketName]; ok == false {
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
}
storedBucket := d.storedBuckets[bucket]
storedBucket := d.storedBuckets[bucketName]
var uploads []*drivers.UploadMetadata
for key, session := range storedBucket.multiPartSession {
@ -391,23 +371,23 @@ func (a partNumber) Len() int { return len(a) }
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
func (d donutDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) {
func (d donutDriver) ListObjectParts(bucketName, objectName string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) {
// Verify upload id
d.lock.RLock()
defer d.lock.RUnlock()
if _, ok := d.storedBuckets[bucket]; ok == false {
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
if _, ok := d.storedBuckets[bucketName]; ok == false {
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
}
storedBucket := d.storedBuckets[bucket]
if _, ok := storedBucket.multiPartSession[key]; ok == false {
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: key}, nil)
storedBucket := d.storedBuckets[bucketName]
if _, ok := storedBucket.multiPartSession[objectName]; ok == false {
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucketName, Object: objectName}, nil)
}
if storedBucket.multiPartSession[key].uploadID != resources.UploadID {
if storedBucket.multiPartSession[objectName].uploadID != resources.UploadID {
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.InvalidUploadID{UploadID: resources.UploadID}, nil)
}
objectResourcesMetadata := resources
objectResourcesMetadata.Bucket = bucket
objectResourcesMetadata.Key = key
objectResourcesMetadata.Bucket = bucketName
objectResourcesMetadata.Key = objectName
var parts []*drivers.PartMetadata
var startPartNumber int
switch {
@ -416,7 +396,7 @@ func (d donutDriver) ListObjectParts(bucket, key string, resources drivers.Objec
default:
startPartNumber = objectResourcesMetadata.PartNumberMarker
}
for i := startPartNumber; i <= storedBucket.multiPartSession[key].totalParts; i++ {
for i := startPartNumber; i <= storedBucket.multiPartSession[objectName].totalParts; i++ {
if len(parts) > objectResourcesMetadata.MaxParts {
sort.Sort(partNumber(parts))
objectResourcesMetadata.IsTruncated = true
@ -424,7 +404,7 @@ func (d donutDriver) ListObjectParts(bucket, key string, resources drivers.Objec
objectResourcesMetadata.NextPartNumberMarker = i
return objectResourcesMetadata, nil
}
part, ok := storedBucket.partMetadata[bucket+"/"+getMultipartKey(key, resources.UploadID, i)]
part, ok := storedBucket.partMetadata[bucketName+"/"+getMultipartKey(objectName, resources.UploadID, i)]
if !ok {
return drivers.ObjectResourcesMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
}

@ -115,6 +115,39 @@ func (r *Cache) Get(key string) ([]byte, bool) {
return value, true
}
// Append will append new data to an existing key,
// if key doesn't exist it behaves like Set()
func (r *Cache) Append(key string, value []byte) bool {
r.Lock()
defer r.Unlock()
valueLen := uint64(len(value))
if r.maxSize > 0 {
// check if the size of the object is not bigger than the
// capacity of the cache
if valueLen > r.maxSize {
return false
}
// remove random key if only we reach the maxSize threshold
for (r.currentSize + valueLen) > r.maxSize {
for randomKey := range r.items {
r.doDelete(randomKey)
break
}
}
}
_, ok := r.items[key]
if !ok {
r.items[key] = value
r.currentSize += valueLen
r.updatedAt[key] = time.Now()
return true
}
r.items[key] = append(r.items[key], value...)
r.currentSize += valueLen
r.updatedAt[key] = time.Now()
return true
}
// Set will persist a value to the cache
func (r *Cache) Set(key string, value []byte) bool {
r.Lock()
@ -128,8 +161,8 @@ func (r *Cache) Set(key string, value []byte) bool {
}
// remove random key if only we reach the maxSize threshold
for (r.currentSize + valueLen) > r.maxSize {
for key := range r.items {
r.doDelete(key)
for randomKey := range r.items {
r.doDelete(randomKey)
break
}
}

Loading…
Cancel
Save