Merge pull request #625 from harshavardhana/pr_out_make_caching_a_package_trove_and_use_it_inside_memory_driver

master
Harshavardhana 10 years ago
commit e1270bcdf6
  1. 13
      pkg/storage/drivers/memory/memory.go
  2. 6
      pkg/storage/drivers/memory/memory_multipart.go
  3. 19
      pkg/storage/trove/trove.go
  4. 45
      pkg/storage/trove/trove_test.go

@ -34,14 +34,15 @@ import (
"github.com/minio/minio/pkg/iodine" "github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/drivers" "github.com/minio/minio/pkg/storage/drivers"
"github.com/minio/minio/pkg/storage/trove"
) )
// memoryDriver - local variables // memoryDriver - local variables
type memoryDriver struct { type memoryDriver struct {
storedBuckets map[string]storedBucket storedBuckets map[string]storedBucket
lock *sync.RWMutex lock *sync.RWMutex
objects *Cache objects *trove.Cache
multiPartObjects *Cache multiPartObjects *trove.Cache
} }
type storedBucket struct { type storedBucket struct {
@ -69,8 +70,8 @@ func Start(maxSize uint64, expiration time.Duration) (chan<- string, <-chan erro
var memory *memoryDriver var memory *memoryDriver
memory = new(memoryDriver) memory = new(memoryDriver)
memory.storedBuckets = make(map[string]storedBucket) memory.storedBuckets = make(map[string]storedBucket)
memory.objects = NewCache(maxSize, expiration) memory.objects = trove.NewCache(maxSize, expiration)
memory.multiPartObjects = NewCache(0, time.Duration(0)) memory.multiPartObjects = trove.NewCache(0, time.Duration(0))
memory.lock = new(sync.RWMutex) memory.lock = new(sync.RWMutex)
memory.objects.OnExpired = memory.expiredObject memory.objects.OnExpired = memory.expiredObject
@ -108,7 +109,7 @@ func (memory *memoryDriver) GetObject(w io.Writer, bucket string, object string)
memory.lock.RUnlock() memory.lock.RUnlock()
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
} }
written, err := io.Copy(w, bytes.NewBuffer(data.([]byte))) written, err := io.Copy(w, bytes.NewBuffer(data))
memory.lock.RUnlock() memory.lock.RUnlock()
return written, iodine.New(err, nil) return written, iodine.New(err, nil)
} }
@ -142,7 +143,7 @@ func (memory *memoryDriver) GetPartialObject(w io.Writer, bucket, object string,
memory.lock.RUnlock() memory.lock.RUnlock()
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, errParams) return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, errParams)
} }
written, err := io.CopyN(w, bytes.NewBuffer(data.([]byte)[start:]), length) written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length)
memory.lock.RUnlock() memory.lock.RUnlock()
return written, iodine.New(err, nil) return written, iodine.New(err, nil)
} }

@ -208,11 +208,9 @@ func (memory *memoryDriver) cleanupMultipartSession(bucket, key, uploadID string
} }
func (memory *memoryDriver) cleanupMultiparts(bucket, key, uploadID string) { func (memory *memoryDriver) cleanupMultiparts(bucket, key, uploadID string) {
memory.lock.Lock()
defer memory.lock.Unlock()
for i := 1; i <= memory.storedBuckets[bucket].multiPartSession[key].totalParts; i++ { for i := 1; i <= memory.storedBuckets[bucket].multiPartSession[key].totalParts; i++ {
objectKey := bucket + "/" + getMultipartKey(key, uploadID, i) objectKey := bucket + "/" + getMultipartKey(key, uploadID, i)
memory.multiPartObjects.doDelete(objectKey) memory.multiPartObjects.Delete(objectKey)
} }
} }
@ -246,7 +244,7 @@ func (memory *memoryDriver) CompleteMultipartUpload(bucket, key, uploadID string
memory.lock.Unlock() memory.lock.Unlock()
return "", iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil) return "", iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
} }
obj := object.([]byte) obj := object
size += int64(len(obj)) size += int64(len(obj))
calcMD5Bytes := md5.Sum(obj) calcMD5Bytes := md5.Sum(obj)
// complete multi part request header md5sum per part is hex encoded // complete multi part request header md5sum per part is hex encoded

@ -14,14 +14,15 @@
* limitations under the License. * limitations under the License.
*/ */
package memory // Package trove implements in memory caching methods
package trove
import ( import (
"sync" "sync"
"time" "time"
) )
var zeroExpiration = time.Duration(0) var noExpiration = time.Duration(0)
// Cache holds the required variables to compose an in memory cache system // Cache holds the required variables to compose an in memory cache system
// which also provides expiring key mechanism and also maxSize // which also provides expiring key mechanism and also maxSize
@ -31,7 +32,7 @@ type Cache struct {
sync.Mutex sync.Mutex
// items hold the cached objects // items hold the cached objects
items map[string]interface{} items map[string][]byte
// updatedAt holds the time that related item's updated at // updatedAt holds the time that related item's updated at
updatedAt map[string]time.Time updatedAt map[string]time.Time
@ -68,7 +69,7 @@ type Stats struct {
// expiration is used for expiration of a key from cache // expiration is used for expiration of a key from cache
func NewCache(maxSize uint64, expiration time.Duration) *Cache { func NewCache(maxSize uint64, expiration time.Duration) *Cache {
return &Cache{ return &Cache{
items: map[string]interface{}{}, items: make(map[string][]byte),
updatedAt: map[string]time.Time{}, updatedAt: map[string]time.Time{},
expiration: expiration, expiration: expiration,
maxSize: maxSize, maxSize: maxSize,
@ -103,7 +104,7 @@ func (r *Cache) ExpireObjects(gcInterval time.Duration) {
} }
// Get returns a value of a given key if it exists // Get returns a value of a given key if it exists
func (r *Cache) Get(key string) (interface{}, bool) { func (r *Cache) Get(key string) ([]byte, bool) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
value, ok := r.items[key] value, ok := r.items[key]
@ -115,10 +116,10 @@ func (r *Cache) Get(key string) (interface{}, bool) {
} }
// Set will persist a value to the cache // Set will persist a value to the cache
func (r *Cache) Set(key string, value interface{}) bool { func (r *Cache) Set(key string, value []byte) bool {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
valueLen := uint64(len(value.([]byte))) valueLen := uint64(len(value))
if r.maxSize > 0 { if r.maxSize > 0 {
// check if the size of the object is not bigger than the // check if the size of the object is not bigger than the
// capacity of the cache // capacity of the cache
@ -159,7 +160,7 @@ func (r *Cache) Delete(key string) {
func (r *Cache) doDelete(key string) { func (r *Cache) doDelete(key string) {
if _, ok := r.items[key]; ok { if _, ok := r.items[key]; ok {
r.currentSize -= uint64(len(r.items[key].([]byte))) r.currentSize -= uint64(len(r.items[key]))
delete(r.items, key) delete(r.items, key)
delete(r.updatedAt, key) delete(r.updatedAt, key)
r.totalExpired++ r.totalExpired++
@ -174,7 +175,7 @@ func (r *Cache) isValid(key string) bool {
if !ok { if !ok {
return false return false
} }
if r.expiration == zeroExpiration { if r.expiration == noExpiration {
return true return true
} }
return updatedAt.Add(r.expiration).After(time.Now()) return updatedAt.Add(r.expiration).After(time.Now())

@ -0,0 +1,45 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package trove
import (
"testing"
. "github.com/minio/check"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestCache(c *C) {
cache := NewCache(1000, 0)
data := []byte("Hello, world!")
ok := cache.Set("filename", data)
c.Assert(ok, Equals, true)
storedata, ok := cache.Get("filename")
c.Assert(ok, Equals, true)
c.Assert(data, DeepEquals, storedata)
cache.Delete("filename")
_, ok = cache.Get("filename")
c.Assert(ok, Equals, false)
}
Loading…
Cancel
Save