cache: Increasing caching GC percent from 20 to 50. (#4041)

Previous value was set to avoid large cache value build
up but we can clearly see this can cause lots of GC
pauses which can lead to significant drop in performance.

Change this value to 50% and decrease the value to 25%
once the 75% cache size is used. To have a larger
window for GC pauses.

Another change is to only allow caching if a server has
more than 24GB of RAM instead of 8GB.
master
Harshavardhana 8 years ago committed by GitHub
parent 18bfe5cba6
commit 7765081db7
  1. 10
      cmd/server-rlimit_test.go
  2. 5
      cmd/xl-v1.go
  3. 48
      pkg/objcache/objcache.go

@ -29,15 +29,15 @@ func TestGetMaxCacheSize(t *testing.T) {
{uint64(0), minRAMSize, uint64(0)},
{uint64(18446744073709551615), uint64(8115998720), uint64(0)},
{uint64(8115998720), uint64(16115998720), uint64(0)},
{minRAMSize, minRAMSize, uint64(4294967296)},
{minRAMSize, uint64(16115998720), uint64(4294967296)},
{uint64(18446744073709551615), uint64(10115998720), uint64(5057999360)},
{minRAMSize, minRAMSize, uint64(12884901888)},
{minRAMSize, uint64(16115998720), uint64(0)},
{uint64(18446744073709551615), uint64(10115998720), uint64(0)},
}
for _, testCase := range testCases {
for i, testCase := range testCases {
cacheSize := getMaxCacheSize(testCase.curLimit, testCase.totalRAM)
if testCase.expectedResult != cacheSize {
t.Fatalf("expected: %v, got: %v", testCase.expectedResult, cacheSize)
t.Fatalf("Test %d, Expected: %v, Got: %v", i+1, testCase.expectedResult, cacheSize)
}
}
}

@ -41,9 +41,8 @@ const (
// Uploads metadata file carries per multipart object metadata.
uploadsJSONFile = "uploads.json"
// Represents the minimum required RAM size before
// we enable caching.
minRAMSize = 8 * humanize.GiByte
// Represents the minimum required RAM size to enable caching.
minRAMSize = 24 * humanize.GiByte
// Maximum erasure blocks.
maxErasureBlocks = 16

@ -39,7 +39,18 @@ const (
defaultBufferRatio = uint64(10)
// defaultGCPercent represents default garbage collection target percentage.
defaultGCPercent = 20
defaultGCPercent = 50
)
var (
// ErrKeyNotFoundInCache - key not found in cache.
ErrKeyNotFoundInCache = errors.New("Key not found in cache")
// ErrCacheFull - cache is full.
ErrCacheFull = errors.New("Not enough space in cache")
// ErrExcessData - excess data was attempted to be written on cache.
ErrExcessData = errors.New("Attempted excess write on cache")
)
// buffer represents the in memory cache of a single entry.
@ -108,7 +119,7 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) {
// If gcpercent=100 and we're using 4M, we'll gc again
// when we get to 8M.
//
// Set this value to 20% if caching is enabled.
// Set this value to 40% if caching is enabled.
debug.SetGCPercent(defaultGCPercent)
// Max cache entry size - indicates the
@ -122,6 +133,7 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) {
}
return i
}()
c = &Cache{
onceGC: sync.Once{},
maxSize: maxSize,
@ -129,6 +141,7 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) {
entries: make(map[string]*buffer),
expiry: expiry,
}
// We have expiry start the janitor routine.
if expiry > 0 {
// Initialize a new stop GC channel.
@ -137,18 +150,10 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) {
// Start garbage collection routine to expire objects.
c.StartGC()
}
return c, nil
}
// ErrKeyNotFoundInCache - key not found in cache.
var ErrKeyNotFoundInCache = errors.New("Key not found in cache")
// ErrCacheFull - cache is full.
var ErrCacheFull = errors.New("Not enough space in cache")
// ErrExcessData - excess data was attempted to be written on cache.
var ErrExcessData = errors.New("Attempted excess write on cache")
// Create - validates if object size fits with in cache size limit and returns a io.WriteCloser
// to which object contents can be written and finally Close()'d. During Close() we
// checks if the amount of data written is equal to the size of the object, in which
@ -169,18 +174,19 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) {
return nil, ErrCacheFull
}
// Check if the incoming size is going to exceed
// the effective cache size, if yes return error
// instead.
c.mutex.Lock()
// Check if the incoming size is going to exceed the
// effective cache size, if yes return error instead.
if c.currentSize+valueLen > c.maxSize {
c.mutex.Unlock()
return nil, ErrCacheFull
}
// Change GC percent if the current cache usage
// is already 75% of the maximum allowed usage.
if c.currentSize > (75 * c.maxSize / 100) {
c.onceGC.Do(func() { debug.SetGCPercent(defaultGCPercent - 10) })
// Change GC percent if the current cache usage might
// become 75% of the maximum allowed usage, change
// the GC percent.
if c.currentSize+valueLen > (75 * c.maxSize / 100) {
c.onceGC.Do(func() { debug.SetGCPercent(defaultGCPercent - 25) })
}
c.mutex.Unlock()
@ -200,11 +206,13 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) {
// Full object not available hence do not save buf to object cache.
return io.ErrShortBuffer
}
// Full object available in buf, save it to cache.
c.entries[key] = &buffer{
value: cbuf.buffer,
lastAccessed: time.Now().UTC(), // Save last accessed time.
}
// Account for the memory allocated above.
c.currentSize += uint64(size)
return nil
@ -213,6 +221,8 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) {
// Object contents that is written - cappedWriter.Write(data)
// will be accumulated in buf which implements io.Writer.
cbuf.onClose = onClose
// Capped writer.
return cbuf, nil
}
@ -228,11 +238,13 @@ func (c *Cache) Open(key string, objModTime time.Time) (io.ReaderAt, error) {
if !ok {
return nil, ErrKeyNotFoundInCache
}
// Check if buf is recent copy of the object on disk.
if buf.lastAccessed.Before(objModTime) {
c.delete(key)
return nil, ErrKeyNotFoundInCache
}
buf.lastAccessed = time.Now().UTC()
return bytes.NewReader(buf.value), nil
}

Loading…
Cancel
Save