From 7765081db7eeb63a4803f64c950146eb3238fd04 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Sat, 15 Apr 2017 02:16:49 -0700 Subject: [PATCH] cache: Increasing caching GC percent from 20 to 50. (#4041) Previous value was set to avoid large cache value build up but we can clearly see this can cause lots of GC pauses which can lead to significant drop in performance. Change this value to 50% and decrease the value to 25% once the 75% cache size is used. To have a larger window for GC pauses. Another change is to only allow caching if a server has more than 24GB of RAM instead of 8GB. --- cmd/server-rlimit_test.go | 10 ++++---- cmd/xl-v1.go | 5 ++-- pkg/objcache/objcache.go | 48 ++++++++++++++++++++++++--------------- 3 files changed, 37 insertions(+), 26 deletions(-) diff --git a/cmd/server-rlimit_test.go b/cmd/server-rlimit_test.go index 57b9d83e4..b2201f6d4 100644 --- a/cmd/server-rlimit_test.go +++ b/cmd/server-rlimit_test.go @@ -29,15 +29,15 @@ func TestGetMaxCacheSize(t *testing.T) { {uint64(0), minRAMSize, uint64(0)}, {uint64(18446744073709551615), uint64(8115998720), uint64(0)}, {uint64(8115998720), uint64(16115998720), uint64(0)}, - {minRAMSize, minRAMSize, uint64(4294967296)}, - {minRAMSize, uint64(16115998720), uint64(4294967296)}, - {uint64(18446744073709551615), uint64(10115998720), uint64(5057999360)}, + {minRAMSize, minRAMSize, uint64(12884901888)}, + {minRAMSize, uint64(16115998720), uint64(0)}, + {uint64(18446744073709551615), uint64(10115998720), uint64(0)}, } - for _, testCase := range testCases { + for i, testCase := range testCases { cacheSize := getMaxCacheSize(testCase.curLimit, testCase.totalRAM) if testCase.expectedResult != cacheSize { - t.Fatalf("expected: %v, got: %v", testCase.expectedResult, cacheSize) + t.Fatalf("Test %d, Expected: %v, Got: %v", i+1, testCase.expectedResult, cacheSize) } } } diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go index 83b319181..f9d24417e 100644 --- a/cmd/xl-v1.go +++ b/cmd/xl-v1.go @@ -41,9 +41,8 @@ const ( // Uploads metadata file carries per multipart object metadata. uploadsJSONFile = "uploads.json" - // Represents the minimum required RAM size before - // we enable caching. - minRAMSize = 8 * humanize.GiByte + // Represents the minimum required RAM size to enable caching. + minRAMSize = 24 * humanize.GiByte // Maximum erasure blocks. maxErasureBlocks = 16 diff --git a/pkg/objcache/objcache.go b/pkg/objcache/objcache.go index 43eb31dc2..59a4aac1e 100644 --- a/pkg/objcache/objcache.go +++ b/pkg/objcache/objcache.go @@ -39,7 +39,18 @@ const ( defaultBufferRatio = uint64(10) // defaultGCPercent represents default garbage collection target percentage. - defaultGCPercent = 20 + defaultGCPercent = 50 +) + +var ( + // ErrKeyNotFoundInCache - key not found in cache. + ErrKeyNotFoundInCache = errors.New("Key not found in cache") + + // ErrCacheFull - cache is full. + ErrCacheFull = errors.New("Not enough space in cache") + + // ErrExcessData - excess data was attempted to be written on cache. + ErrExcessData = errors.New("Attempted excess write on cache") ) // buffer represents the in memory cache of a single entry. @@ -108,7 +119,7 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) { // If gcpercent=100 and we're using 4M, we'll gc again // when we get to 8M. // - // Set this value to 20% if caching is enabled. + // Set this value to 40% if caching is enabled. debug.SetGCPercent(defaultGCPercent) // Max cache entry size - indicates the @@ -122,6 +133,7 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) { } return i }() + c = &Cache{ onceGC: sync.Once{}, maxSize: maxSize, @@ -129,6 +141,7 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) { entries: make(map[string]*buffer), expiry: expiry, } + // We have expiry start the janitor routine. if expiry > 0 { // Initialize a new stop GC channel. @@ -137,18 +150,10 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) { // Start garbage collection routine to expire objects. c.StartGC() } + return c, nil } -// ErrKeyNotFoundInCache - key not found in cache. -var ErrKeyNotFoundInCache = errors.New("Key not found in cache") - -// ErrCacheFull - cache is full. -var ErrCacheFull = errors.New("Not enough space in cache") - -// ErrExcessData - excess data was attempted to be written on cache. -var ErrExcessData = errors.New("Attempted excess write on cache") - // Create - validates if object size fits with in cache size limit and returns a io.WriteCloser // to which object contents can be written and finally Close()'d. During Close() we // checks if the amount of data written is equal to the size of the object, in which @@ -169,18 +174,19 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) { return nil, ErrCacheFull } - // Check if the incoming size is going to exceed - // the effective cache size, if yes return error - // instead. c.mutex.Lock() + // Check if the incoming size is going to exceed the + // effective cache size, if yes return error instead. if c.currentSize+valueLen > c.maxSize { c.mutex.Unlock() return nil, ErrCacheFull } - // Change GC percent if the current cache usage - // is already 75% of the maximum allowed usage. - if c.currentSize > (75 * c.maxSize / 100) { - c.onceGC.Do(func() { debug.SetGCPercent(defaultGCPercent - 10) }) + + // Change GC percent if the current cache usage might + // become 75% of the maximum allowed usage, change + // the GC percent. + if c.currentSize+valueLen > (75 * c.maxSize / 100) { + c.onceGC.Do(func() { debug.SetGCPercent(defaultGCPercent - 25) }) } c.mutex.Unlock() @@ -200,11 +206,13 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) { // Full object not available hence do not save buf to object cache. return io.ErrShortBuffer } + // Full object available in buf, save it to cache. c.entries[key] = &buffer{ value: cbuf.buffer, lastAccessed: time.Now().UTC(), // Save last accessed time. } + // Account for the memory allocated above. c.currentSize += uint64(size) return nil @@ -213,6 +221,8 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) { // Object contents that is written - cappedWriter.Write(data) // will be accumulated in buf which implements io.Writer. cbuf.onClose = onClose + + // Capped writer. return cbuf, nil } @@ -228,11 +238,13 @@ func (c *Cache) Open(key string, objModTime time.Time) (io.ReaderAt, error) { if !ok { return nil, ErrKeyNotFoundInCache } + // Check if buf is recent copy of the object on disk. if buf.lastAccessed.Before(objModTime) { c.delete(key) return nil, ErrKeyNotFoundInCache } + buf.lastAccessed = time.Now().UTC() return bytes.NewReader(buf.value), nil }