cache: Increasing caching GC percent from 20 to 50. (#4041)

Previous value was set to avoid large cache value build
up but we can clearly see this can cause lots of GC
pauses which can lead to significant drop in performance.

Change this value to 50% and decrease the value to 25%
once the 75% cache size is used. To have a larger
window for GC pauses.

Another change is to only allow caching if a server has
more than 24GB of RAM instead of 8GB.
master
Harshavardhana 8 years ago committed by GitHub
parent 18bfe5cba6
commit 7765081db7
  1. 10
      cmd/server-rlimit_test.go
  2. 5
      cmd/xl-v1.go
  3. 48
      pkg/objcache/objcache.go

@ -29,15 +29,15 @@ func TestGetMaxCacheSize(t *testing.T) {
{uint64(0), minRAMSize, uint64(0)}, {uint64(0), minRAMSize, uint64(0)},
{uint64(18446744073709551615), uint64(8115998720), uint64(0)}, {uint64(18446744073709551615), uint64(8115998720), uint64(0)},
{uint64(8115998720), uint64(16115998720), uint64(0)}, {uint64(8115998720), uint64(16115998720), uint64(0)},
{minRAMSize, minRAMSize, uint64(4294967296)}, {minRAMSize, minRAMSize, uint64(12884901888)},
{minRAMSize, uint64(16115998720), uint64(4294967296)}, {minRAMSize, uint64(16115998720), uint64(0)},
{uint64(18446744073709551615), uint64(10115998720), uint64(5057999360)}, {uint64(18446744073709551615), uint64(10115998720), uint64(0)},
} }
for _, testCase := range testCases { for i, testCase := range testCases {
cacheSize := getMaxCacheSize(testCase.curLimit, testCase.totalRAM) cacheSize := getMaxCacheSize(testCase.curLimit, testCase.totalRAM)
if testCase.expectedResult != cacheSize { if testCase.expectedResult != cacheSize {
t.Fatalf("expected: %v, got: %v", testCase.expectedResult, cacheSize) t.Fatalf("Test %d, Expected: %v, Got: %v", i+1, testCase.expectedResult, cacheSize)
} }
} }
} }

@ -41,9 +41,8 @@ const (
// Uploads metadata file carries per multipart object metadata. // Uploads metadata file carries per multipart object metadata.
uploadsJSONFile = "uploads.json" uploadsJSONFile = "uploads.json"
// Represents the minimum required RAM size before // Represents the minimum required RAM size to enable caching.
// we enable caching. minRAMSize = 24 * humanize.GiByte
minRAMSize = 8 * humanize.GiByte
// Maximum erasure blocks. // Maximum erasure blocks.
maxErasureBlocks = 16 maxErasureBlocks = 16

@ -39,7 +39,18 @@ const (
defaultBufferRatio = uint64(10) defaultBufferRatio = uint64(10)
// defaultGCPercent represents default garbage collection target percentage. // defaultGCPercent represents default garbage collection target percentage.
defaultGCPercent = 20 defaultGCPercent = 50
)
var (
// ErrKeyNotFoundInCache - key not found in cache.
ErrKeyNotFoundInCache = errors.New("Key not found in cache")
// ErrCacheFull - cache is full.
ErrCacheFull = errors.New("Not enough space in cache")
// ErrExcessData - excess data was attempted to be written on cache.
ErrExcessData = errors.New("Attempted excess write on cache")
) )
// buffer represents the in memory cache of a single entry. // buffer represents the in memory cache of a single entry.
@ -108,7 +119,7 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) {
// If gcpercent=100 and we're using 4M, we'll gc again // If gcpercent=100 and we're using 4M, we'll gc again
// when we get to 8M. // when we get to 8M.
// //
// Set this value to 20% if caching is enabled. // Set this value to 40% if caching is enabled.
debug.SetGCPercent(defaultGCPercent) debug.SetGCPercent(defaultGCPercent)
// Max cache entry size - indicates the // Max cache entry size - indicates the
@ -122,6 +133,7 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) {
} }
return i return i
}() }()
c = &Cache{ c = &Cache{
onceGC: sync.Once{}, onceGC: sync.Once{},
maxSize: maxSize, maxSize: maxSize,
@ -129,6 +141,7 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) {
entries: make(map[string]*buffer), entries: make(map[string]*buffer),
expiry: expiry, expiry: expiry,
} }
// We have expiry start the janitor routine. // We have expiry start the janitor routine.
if expiry > 0 { if expiry > 0 {
// Initialize a new stop GC channel. // Initialize a new stop GC channel.
@ -137,18 +150,10 @@ func New(maxSize uint64, expiry time.Duration) (c *Cache, err error) {
// Start garbage collection routine to expire objects. // Start garbage collection routine to expire objects.
c.StartGC() c.StartGC()
} }
return c, nil return c, nil
} }
// ErrKeyNotFoundInCache - key not found in cache.
var ErrKeyNotFoundInCache = errors.New("Key not found in cache")
// ErrCacheFull - cache is full.
var ErrCacheFull = errors.New("Not enough space in cache")
// ErrExcessData - excess data was attempted to be written on cache.
var ErrExcessData = errors.New("Attempted excess write on cache")
// Create - validates if object size fits with in cache size limit and returns a io.WriteCloser // Create - validates if object size fits with in cache size limit and returns a io.WriteCloser
// to which object contents can be written and finally Close()'d. During Close() we // to which object contents can be written and finally Close()'d. During Close() we
// checks if the amount of data written is equal to the size of the object, in which // checks if the amount of data written is equal to the size of the object, in which
@ -169,18 +174,19 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) {
return nil, ErrCacheFull return nil, ErrCacheFull
} }
// Check if the incoming size is going to exceed
// the effective cache size, if yes return error
// instead.
c.mutex.Lock() c.mutex.Lock()
// Check if the incoming size is going to exceed the
// effective cache size, if yes return error instead.
if c.currentSize+valueLen > c.maxSize { if c.currentSize+valueLen > c.maxSize {
c.mutex.Unlock() c.mutex.Unlock()
return nil, ErrCacheFull return nil, ErrCacheFull
} }
// Change GC percent if the current cache usage
// is already 75% of the maximum allowed usage. // Change GC percent if the current cache usage might
if c.currentSize > (75 * c.maxSize / 100) { // become 75% of the maximum allowed usage, change
c.onceGC.Do(func() { debug.SetGCPercent(defaultGCPercent - 10) }) // the GC percent.
if c.currentSize+valueLen > (75 * c.maxSize / 100) {
c.onceGC.Do(func() { debug.SetGCPercent(defaultGCPercent - 25) })
} }
c.mutex.Unlock() c.mutex.Unlock()
@ -200,11 +206,13 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) {
// Full object not available hence do not save buf to object cache. // Full object not available hence do not save buf to object cache.
return io.ErrShortBuffer return io.ErrShortBuffer
} }
// Full object available in buf, save it to cache. // Full object available in buf, save it to cache.
c.entries[key] = &buffer{ c.entries[key] = &buffer{
value: cbuf.buffer, value: cbuf.buffer,
lastAccessed: time.Now().UTC(), // Save last accessed time. lastAccessed: time.Now().UTC(), // Save last accessed time.
} }
// Account for the memory allocated above. // Account for the memory allocated above.
c.currentSize += uint64(size) c.currentSize += uint64(size)
return nil return nil
@ -213,6 +221,8 @@ func (c *Cache) Create(key string, size int64) (w io.WriteCloser, err error) {
// Object contents that is written - cappedWriter.Write(data) // Object contents that is written - cappedWriter.Write(data)
// will be accumulated in buf which implements io.Writer. // will be accumulated in buf which implements io.Writer.
cbuf.onClose = onClose cbuf.onClose = onClose
// Capped writer.
return cbuf, nil return cbuf, nil
} }
@ -228,11 +238,13 @@ func (c *Cache) Open(key string, objModTime time.Time) (io.ReaderAt, error) {
if !ok { if !ok {
return nil, ErrKeyNotFoundInCache return nil, ErrKeyNotFoundInCache
} }
// Check if buf is recent copy of the object on disk. // Check if buf is recent copy of the object on disk.
if buf.lastAccessed.Before(objModTime) { if buf.lastAccessed.Before(objModTime) {
c.delete(key) c.delete(key)
return nil, ErrKeyNotFoundInCache return nil, ErrKeyNotFoundInCache
} }
buf.lastAccessed = time.Now().UTC() buf.lastAccessed = time.Now().UTC()
return bytes.NewReader(buf.value), nil return bytes.NewReader(buf.value), nil
} }

Loading…
Cancel
Save