Remove brittle tests for cache (#9570)

master
poornas 5 years ago committed by GitHub
parent f8edc233ab
commit a8e5a86fa0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 183
      cmd/disk-cache_test.go

@ -17,31 +17,9 @@
package cmd package cmd
import ( import (
"bytes"
"io"
"testing" "testing"
"github.com/minio/minio/pkg/hash"
) )
// Initialize cache objects.
func initCacheObjects(disk string, cacheMaxUse, cacheAfter, cacheWatermarkLow, cacheWatermarkHigh int) (*diskCache, error) {
return newDiskCache(disk, cacheMaxUse, cacheAfter, cacheWatermarkLow, cacheWatermarkHigh)
}
// inits diskCache struct for nDisks
func initDiskCaches(drives []string, cacheMaxUse, cacheAfter, cacheWatermarkLow, cacheWatermarkHigh int, t *testing.T) ([]*diskCache, error) {
var cb []*diskCache
for _, d := range drives {
obj, err := initCacheObjects(d, cacheMaxUse, cacheAfter, cacheWatermarkLow, cacheWatermarkHigh)
if err != nil {
return nil, err
}
cb = append(cb, obj)
}
return cb, nil
}
// Tests ToObjectInfo function. // Tests ToObjectInfo function.
func TestCacheMetadataObjInfo(t *testing.T) { func TestCacheMetadataObjInfo(t *testing.T) {
m := cacheMeta{Meta: nil} m := cacheMeta{Meta: nil}
@ -60,85 +38,6 @@ func TestCacheMetadataObjInfo(t *testing.T) {
} }
} }
// test whether a drive being offline causes
// getCachedLoc to fetch next online drive
func TestGetCachedLoc(t *testing.T) {
for n := 1; n < 10; n++ {
fsDirs, err := getRandomDisks(n)
if err != nil {
t.Fatal(err)
}
d, err := initDiskCaches(fsDirs, 100, 1, 80, 90, t)
if err != nil {
t.Fatal(err)
}
c := cacheObjects{cache: d}
bucketName := "testbucket"
objectName := "testobject"
// find cache drive where object would be hashed
index := c.hashIndex(bucketName, objectName)
// turn off drive by setting online status to false
c.cache[index].setOffline()
cfs, err := c.getCacheLoc(bucketName, objectName)
if n == 1 && err == errDiskNotFound {
continue
}
if err != nil {
t.Fatal(err)
}
i := -1
for j, f := range c.cache {
if f == cfs {
i = j
break
}
}
if i != (index+1)%n {
t.Fatalf("expected next cache location to be picked")
}
}
}
// test whether a drive being offline causes
// getCachedLoc to fetch next online drive
func TestGetCacheMaxUse(t *testing.T) {
for n := 1; n < 10; n++ {
fsDirs, err := getRandomDisks(n)
if err != nil {
t.Fatal(err)
}
d, err := initDiskCaches(fsDirs, 80, 1, 80, 90, t)
if err != nil {
t.Fatal(err)
}
c := cacheObjects{cache: d}
bucketName := "testbucket"
objectName := "testobject"
// find cache drive where object would be hashed
index := c.hashIndex(bucketName, objectName)
// turn off drive by setting online status to false
c.cache[index].setOffline()
cb, err := c.getCacheLoc(bucketName, objectName)
if n == 1 && err == errDiskNotFound {
continue
}
if err != nil {
t.Fatal(err)
}
i := -1
for j, f := range d {
if f == cb {
i = j
break
}
}
if i != (index+1)%n {
t.Fatalf("expected next cache location to be picked")
}
}
}
// test wildcard patterns for excluding entries from cache // test wildcard patterns for excluding entries from cache
func TestCacheExclusion(t *testing.T) { func TestCacheExclusion(t *testing.T) {
cobjects := &cacheObjects{ cobjects := &cacheObjects{
@ -171,85 +70,3 @@ func TestCacheExclusion(t *testing.T) {
} }
} }
} }
// Test diskCache with upper bound on max cache use.
func TestDiskCacheMaxUse(t *testing.T) {
fsDirs, err := getRandomDisks(1)
if err != nil {
t.Fatal(err)
}
d, err := initDiskCaches(fsDirs, 90, 0, 90, 99, t)
if err != nil {
t.Fatal(err)
}
cache := d[0]
ctx := GlobalContext
bucketName := "testbucket"
objectName := "testobject"
content := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
etag := "061208c10af71a30c6dcd6cf5d89f0fe"
contentType := "application/zip"
size := len(content)
httpMeta := make(map[string]string)
httpMeta["etag"] = etag
httpMeta["content-type"] = contentType
objInfo := ObjectInfo{}
objInfo.Bucket = bucketName
objInfo.Name = objectName
objInfo.Size = int64(size)
objInfo.ContentType = contentType
objInfo.ETag = etag
objInfo.UserDefined = httpMeta
opts := ObjectOptions{}
byteReader := bytes.NewReader([]byte(content))
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size), globalCLIContext.StrictS3Compat)
if err != nil {
t.Fatal(err)
}
if !cache.diskAvailable(int64(size)) {
err = cache.Put(ctx, bucketName, objectName, hashReader, hashReader.Size(), nil, ObjectOptions{UserDefined: httpMeta}, false)
if err != errDiskFull {
t.Fatal("Cache max-use limit violated.")
}
} else {
err = cache.Put(ctx, bucketName, objectName, hashReader, hashReader.Size(), nil, ObjectOptions{UserDefined: httpMeta}, false)
if err != nil {
t.Fatal(err)
}
cReader, _, err := cache.Get(ctx, bucketName, objectName, nil, nil, opts)
if err != nil {
t.Fatal(err)
}
cachedObjInfo := cReader.ObjInfo
if !cache.Exists(ctx, bucketName, objectName) {
t.Fatal("Expected object to exist on cache")
}
if cachedObjInfo.ETag != objInfo.ETag {
t.Fatal("Expected ETag to match")
}
if cachedObjInfo.Size != objInfo.Size {
t.Fatal("Size mismatch")
}
if cachedObjInfo.ContentType != objInfo.ContentType {
t.Fatal("Cached content-type does not match")
}
writer := bytes.NewBuffer(nil)
_, err = io.Copy(writer, cReader)
if err != nil {
t.Fatal(err)
}
if ccontent := writer.Bytes(); !bytes.Equal([]byte(content), ccontent) {
t.Errorf("wrong cached file content")
}
cReader.Close()
cache.Delete(ctx, bucketName, objectName)
online := cache.IsOnline()
if !online {
t.Errorf("expected cache drive to be online")
}
}
}

Loading…
Cancel
Save