Rewrite cache implementation to cache only on GET (#7694)
Fixes #7458 Fixes #7573 Fixes #7938 Fixes #6934 Fixes #6265 Fixes #6630 This will allow the cache to consistently work for server and gateways. Range GET requests will be cached in the background after the request is served from the backend. - All cached content is automatically bitrot protected. - Avoid ETag verification if a cache-control header is set and the cached content is still valid. - This PR changes the cache backend format, and all existing content will be migrated to the new format. Until the data is migrated completely, all content will be served from the backend.master
parent
1ce8d2c476
commit
3385bf3da8
@ -0,0 +1,573 @@ |
||||
/* |
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"encoding/hex" |
||||
"encoding/json" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"log" |
||||
"net/http" |
||||
"os" |
||||
"path" |
||||
"reflect" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/djherbis/atime" |
||||
"github.com/minio/minio/cmd/logger" |
||||
"github.com/minio/minio/pkg/disk" |
||||
"github.com/ncw/directio" |
||||
) |
||||
|
||||
const ( |
||||
// cache.json object metadata for cached objects.
|
||||
cacheMetaJSONFile = "cache.json" |
||||
cacheDataFile = "part.1" |
||||
cacheMetaVersion = "1.0.0" |
||||
|
||||
cacheEnvDelimiter = ";" |
||||
) |
||||
|
||||
// CacheChecksumInfoV1 - carries checksums of individual blocks on disk.
|
||||
type CacheChecksumInfoV1 struct { |
||||
Algorithm string `json:"algorithm"` |
||||
Blocksize int64 `json:"blocksize"` |
||||
} |
||||
|
||||
// Represents the cache metadata struct
|
||||
type cacheMeta struct { |
||||
Version string `json:"version"` |
||||
Stat statInfo `json:"stat"` // Stat of the current object `cache.json`.
|
||||
|
||||
// checksums of blocks on disk.
|
||||
Checksum CacheChecksumInfoV1 `json:"checksum,omitempty"` |
||||
// Metadata map for current object.
|
||||
Meta map[string]string `json:"meta,omitempty"` |
||||
} |
||||
|
||||
func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) { |
||||
if len(m.Meta) == 0 { |
||||
m.Meta = make(map[string]string) |
||||
m.Stat.ModTime = timeSentinel |
||||
} |
||||
|
||||
o = ObjectInfo{ |
||||
Bucket: bucket, |
||||
Name: object, |
||||
} |
||||
|
||||
// We set file info only if its valid.
|
||||
o.ModTime = m.Stat.ModTime |
||||
o.Size = m.Stat.Size |
||||
o.ETag = extractETag(m.Meta) |
||||
o.ContentType = m.Meta["content-type"] |
||||
o.ContentEncoding = m.Meta["content-encoding"] |
||||
if storageClass, ok := m.Meta[amzStorageClass]; ok { |
||||
o.StorageClass = storageClass |
||||
} else { |
||||
o.StorageClass = globalMinioDefaultStorageClass |
||||
} |
||||
var ( |
||||
t time.Time |
||||
e error |
||||
) |
||||
if exp, ok := m.Meta["expires"]; ok { |
||||
if t, e = time.Parse(http.TimeFormat, exp); e == nil { |
||||
o.Expires = t.UTC() |
||||
} |
||||
} |
||||
// etag/md5Sum has already been extracted. We need to
|
||||
// remove to avoid it from appearing as part of user-defined metadata
|
||||
o.UserDefined = cleanMetadata(m.Meta) |
||||
return o |
||||
} |
||||
|
||||
// represents disk cache struct
|
||||
type diskCache struct { |
||||
dir string // caching directory
|
||||
maxDiskUsagePct int // max usage in %
|
||||
expiry int // cache expiry in days
|
||||
// mark false if drive is offline
|
||||
online bool |
||||
// mutex to protect updates to online variable
|
||||
onlineMutex *sync.RWMutex |
||||
// purge() listens on this channel to start the cache-purge process
|
||||
purgeChan chan struct{} |
||||
pool sync.Pool |
||||
} |
||||
|
||||
// Inits the disk cache dir if it is not initialized already.
|
||||
func newdiskCache(dir string, expiry int, maxDiskUsagePct int) (*diskCache, error) { |
||||
if err := os.MkdirAll(dir, 0777); err != nil { |
||||
return nil, fmt.Errorf("Unable to initialize '%s' dir, %s", dir, err) |
||||
} |
||||
|
||||
if expiry == 0 { |
||||
expiry = globalCacheExpiry |
||||
} |
||||
cache := diskCache{ |
||||
dir: dir, |
||||
expiry: expiry, |
||||
maxDiskUsagePct: maxDiskUsagePct, |
||||
purgeChan: make(chan struct{}), |
||||
online: true, |
||||
onlineMutex: &sync.RWMutex{}, |
||||
pool: sync.Pool{ |
||||
New: func() interface{} { |
||||
b := directio.AlignedBlock(int(cacheBlkSize)) |
||||
return &b |
||||
}, |
||||
}, |
||||
} |
||||
return &cache, nil |
||||
} |
||||
|
||||
// Returns if the disk usage is low.
|
||||
// Disk usage is low if usage is < 80% of cacheMaxDiskUsagePct
|
||||
// Ex. for a 100GB disk, if maxUsage is configured as 70% then cacheMaxDiskUsagePct is 70G
|
||||
// hence disk usage is low if the disk usage is less than 56G (because 80% of 70G is 56G)
|
||||
func (c *diskCache) diskUsageLow() bool { |
||||
minUsage := c.maxDiskUsagePct * 80 / 100 |
||||
di, err := disk.GetInfo(c.dir) |
||||
if err != nil { |
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir) |
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo) |
||||
logger.LogIf(ctx, err) |
||||
return false |
||||
} |
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total |
||||
return int(usedPercent) < minUsage |
||||
} |
||||
|
||||
// Return if the disk usage is high.
|
||||
// Disk usage is high if disk used is > cacheMaxDiskUsagePct
|
||||
func (c *diskCache) diskUsageHigh() bool { |
||||
di, err := disk.GetInfo(c.dir) |
||||
if err != nil { |
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir) |
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo) |
||||
logger.LogIf(ctx, err) |
||||
return true |
||||
} |
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total |
||||
return int(usedPercent) > c.maxDiskUsagePct |
||||
} |
||||
|
||||
// Returns if size space can be allocated without exceeding
|
||||
// max disk usable for caching
|
||||
func (c *diskCache) diskAvailable(size int64) bool { |
||||
di, err := disk.GetInfo(c.dir) |
||||
if err != nil { |
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir) |
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo) |
||||
logger.LogIf(ctx, err) |
||||
return false |
||||
} |
||||
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total |
||||
return int(usedPercent) < c.maxDiskUsagePct |
||||
} |
||||
|
||||
// Purge cache entries that were not accessed.
|
||||
func (c *diskCache) purge() { |
||||
ctx := context.Background() |
||||
for { |
||||
olderThan := c.expiry |
||||
for !c.diskUsageLow() { |
||||
// delete unaccessed objects older than expiry duration
|
||||
expiry := UTCNow().AddDate(0, 0, -1*olderThan) |
||||
olderThan /= 2 |
||||
if olderThan < 1 { |
||||
break |
||||
} |
||||
deletedCount := 0 |
||||
|
||||
objDirs, err := ioutil.ReadDir(c.dir) |
||||
if err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
|
||||
for _, obj := range objDirs { |
||||
if obj.Name() == minioMetaBucket { |
||||
continue |
||||
} |
||||
// stat entry to get atime
|
||||
var fi os.FileInfo |
||||
fi, err := os.Stat(pathJoin(c.dir, obj.Name(), cacheDataFile)) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
|
||||
objInfo, err := c.statCache(ctx, pathJoin(c.dir, obj.Name())) |
||||
if err != nil { |
||||
// delete any partially filled cache entry left behind.
|
||||
removeAll(pathJoin(c.dir, obj.Name())) |
||||
continue |
||||
} |
||||
cc := cacheControlOpts(objInfo) |
||||
if atime.Get(fi).Before(expiry) || |
||||
cc.isStale(objInfo.ModTime) { |
||||
if err = removeAll(pathJoin(c.dir, obj.Name())); err != nil { |
||||
logger.LogIf(ctx, err) |
||||
} |
||||
deletedCount++ |
||||
// break early if sufficient disk space reclaimed.
|
||||
if !c.diskUsageLow() { |
||||
break |
||||
} |
||||
} |
||||
} |
||||
if deletedCount == 0 { |
||||
break |
||||
} |
||||
} |
||||
lastRunTime := time.Now() |
||||
for { |
||||
<-c.purgeChan |
||||
timeElapsed := time.Since(lastRunTime) |
||||
if timeElapsed > time.Hour { |
||||
break |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// sets cache drive status
|
||||
func (c *diskCache) setOnline(status bool) { |
||||
c.onlineMutex.Lock() |
||||
c.online = status |
||||
c.onlineMutex.Unlock() |
||||
} |
||||
|
||||
// returns true if cache drive is online
|
||||
func (c *diskCache) IsOnline() bool { |
||||
c.onlineMutex.RLock() |
||||
defer c.onlineMutex.RUnlock() |
||||
return c.online |
||||
} |
||||
|
||||
// Stat returns ObjectInfo from disk cache
|
||||
func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectInfo, err error) { |
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object) |
||||
oi, err = c.statCache(ctx, cacheObjPath) |
||||
if err != nil { |
||||
return |
||||
} |
||||
oi.Bucket = bucket |
||||
oi.Name = object |
||||
return |
||||
} |
||||
|
||||
// statCache is a convenience function for purge() to get ObjectInfo for cached object
|
||||
func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (oi ObjectInfo, e error) { |
||||
// Stat the file to get file size.
|
||||
metaPath := path.Join(cacheObjPath, cacheMetaJSONFile) |
||||
f, err := os.Open(metaPath) |
||||
if err != nil { |
||||
return oi, err |
||||
} |
||||
defer f.Close() |
||||
|
||||
meta := &cacheMeta{Version: cacheMetaVersion} |
||||
if err := jsonLoad(f, meta); err != nil { |
||||
return oi, err |
||||
} |
||||
fi, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile)) |
||||
if err != nil { |
||||
return oi, err |
||||
} |
||||
meta.Stat.ModTime = atime.Get(fi) |
||||
return meta.ToObjectInfo("", ""), nil |
||||
} |
||||
|
||||
// saves object metadata to disk cache
|
||||
func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64) error { |
||||
fileName := getCacheSHADir(c.dir, bucket, object) |
||||
metaPath := pathJoin(fileName, cacheMetaJSONFile) |
||||
|
||||
f, err := os.Create(metaPath) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer f.Close() |
||||
|
||||
m := cacheMeta{Meta: meta, Version: cacheMetaVersion} |
||||
m.Stat.Size = actualSize |
||||
m.Stat.ModTime = UTCNow() |
||||
m.Checksum = CacheChecksumInfoV1{Algorithm: HighwayHash256S.String(), Blocksize: cacheBlkSize} |
||||
jsonData, err := json.Marshal(m) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
_, err = f.Write(jsonData) |
||||
return err |
||||
} |
||||
|
||||
// Backend metadata could have changed through server side copy - reset cache metadata if that is the case
|
||||
func (c *diskCache) updateMetadataIfChanged(ctx context.Context, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo) error { |
||||
if !reflect.DeepEqual(bkObjectInfo.UserDefined, cacheObjInfo.UserDefined) || |
||||
bkObjectInfo.ETag != cacheObjInfo.ETag || |
||||
bkObjectInfo.ContentType != cacheObjInfo.ContentType || |
||||
bkObjectInfo.Expires != cacheObjInfo.Expires { |
||||
return c.saveMetadata(ctx, bucket, object, getMetadata(bkObjectInfo), bkObjectInfo.Size) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func getCacheSHADir(dir, bucket, object string) string { |
||||
return path.Join(dir, getSHA256Hash([]byte(path.Join(bucket, object)))) |
||||
} |
||||
|
||||
// Cache data to disk with bitrot checksum added for each block of 1MB
|
||||
func (c *diskCache) bitrotWriteToCache(ctx context.Context, cachePath string, reader io.Reader, size int64) (int64, error) { |
||||
if err := os.MkdirAll(cachePath, 0777); err != nil { |
||||
return 0, err |
||||
} |
||||
bufSize := int64(readSizeV1) |
||||
if size > 0 && bufSize > size { |
||||
bufSize = size |
||||
} |
||||
filePath := path.Join(cachePath, cacheDataFile) |
||||
|
||||
if filePath == "" || reader == nil { |
||||
return 0, errInvalidArgument |
||||
} |
||||
|
||||
if err := checkPathLength(filePath); err != nil { |
||||
return 0, err |
||||
} |
||||
f, err := os.Create(filePath) |
||||
if err != nil { |
||||
return 0, osErrToFSFileErr(err) |
||||
} |
||||
defer f.Close() |
||||
|
||||
var bytesWritten int64 |
||||
|
||||
h := HighwayHash256S.New() |
||||
|
||||
bufp := c.pool.Get().(*[]byte) |
||||
defer c.pool.Put(bufp) |
||||
|
||||
for { |
||||
n, err := io.ReadFull(reader, *bufp) |
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF && err != io.ErrClosedPipe { |
||||
return 0, err |
||||
} |
||||
eof := err == io.EOF || err == io.ErrUnexpectedEOF || err == io.ErrClosedPipe |
||||
if n == 0 && size != 0 { |
||||
// Reached EOF, nothing more to be done.
|
||||
break |
||||
} |
||||
h.Reset() |
||||
if _, err := h.Write((*bufp)[:n]); err != nil { |
||||
return 0, err |
||||
} |
||||
hashBytes := h.Sum(nil) |
||||
if _, err = f.Write(hashBytes); err != nil { |
||||
return 0, err |
||||
} |
||||
if _, err = f.Write((*bufp)[:n]); err != nil { |
||||
return 0, err |
||||
} |
||||
bytesWritten += int64(n) |
||||
if eof { |
||||
break |
||||
} |
||||
} |
||||
return bytesWritten, nil |
||||
} |
||||
|
||||
// Caches the object to disk
|
||||
func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Reader, size int64, opts ObjectOptions) error { |
||||
if c.diskUsageHigh() { |
||||
select { |
||||
case c.purgeChan <- struct{}{}: |
||||
default: |
||||
} |
||||
return errDiskFull |
||||
} |
||||
if !c.diskAvailable(size) { |
||||
return errDiskFull |
||||
} |
||||
cachePath := getCacheSHADir(c.dir, bucket, object) |
||||
if err := os.MkdirAll(cachePath, 0777); err != nil { |
||||
return err |
||||
} |
||||
bufSize := int64(readSizeV1) |
||||
if size > 0 && bufSize > size { |
||||
bufSize = size |
||||
} |
||||
|
||||
n, err := c.bitrotWriteToCache(ctx, cachePath, data, size) |
||||
if IsErr(err, baseErrs...) { |
||||
c.setOnline(false) |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return c.saveMetadata(ctx, bucket, object, opts.UserDefined, n) |
||||
} |
||||
|
||||
// checks streaming bitrot checksum of cached object before returning data
|
||||
func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, offset, length int64, writer io.Writer) error { |
||||
h := HighwayHash256S.New() |
||||
|
||||
checksumHash := make([]byte, h.Size()) |
||||
|
||||
startBlock := offset / cacheBlkSize |
||||
endBlock := (offset + length) / cacheBlkSize |
||||
|
||||
// get block start offset
|
||||
var blockStartOffset int64 |
||||
if startBlock > 0 { |
||||
blockStartOffset = (cacheBlkSize + int64(h.Size())) * startBlock |
||||
} |
||||
|
||||
tillLength := (cacheBlkSize + int64(h.Size())) * (endBlock - startBlock + 1) |
||||
|
||||
// Start offset cannot be negative.
|
||||
if offset < 0 { |
||||
logger.LogIf(ctx, errUnexpected) |
||||
return errUnexpected |
||||
} |
||||
|
||||
// Writer cannot be nil.
|
||||
if writer == nil { |
||||
logger.LogIf(ctx, errUnexpected) |
||||
return errUnexpected |
||||
} |
||||
var blockOffset, blockLength int64 |
||||
rc, err := readCacheFileStream(filePath, blockStartOffset, tillLength) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
bufp := c.pool.Get().(*[]byte) |
||||
defer c.pool.Put(bufp) |
||||
|
||||
for block := startBlock; block <= endBlock; block++ { |
||||
switch { |
||||
case startBlock == endBlock: |
||||
blockOffset = offset % cacheBlkSize |
||||
blockLength = length |
||||
case block == startBlock: |
||||
blockOffset = offset % cacheBlkSize |
||||
blockLength = cacheBlkSize - blockOffset |
||||
case block == endBlock: |
||||
blockOffset = 0 |
||||
blockLength = (offset + length) % cacheBlkSize |
||||
default: |
||||
blockOffset = 0 |
||||
blockLength = cacheBlkSize |
||||
} |
||||
if blockLength == 0 { |
||||
break |
||||
} |
||||
if _, err := io.ReadFull(rc, checksumHash); err != nil { |
||||
return err |
||||
} |
||||
|
||||
h.Reset() |
||||
n, err := io.ReadFull(rc, *bufp) |
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { |
||||
logger.LogIf(ctx, err) |
||||
return err |
||||
} |
||||
eof := err == io.EOF || err == io.ErrUnexpectedEOF |
||||
if n == 0 && length != 0 { |
||||
// Reached EOF, nothing more to be done.
|
||||
break |
||||
} |
||||
|
||||
if _, e := h.Write((*bufp)[:n]); e != nil { |
||||
return e |
||||
} |
||||
hashBytes := h.Sum(nil) |
||||
|
||||
if !bytes.Equal(hashBytes, checksumHash) { |
||||
err = HashMismatchError{hex.EncodeToString(checksumHash), hex.EncodeToString(hashBytes)} |
||||
logger.LogIf(context.Background(), err) |
||||
return err |
||||
} |
||||
|
||||
if _, err := io.Copy(writer, bytes.NewReader((*bufp)[blockOffset:blockOffset+blockLength])); err != nil { |
||||
if err != io.ErrClosedPipe { |
||||
logger.LogIf(ctx, err) |
||||
} |
||||
return err |
||||
} |
||||
if eof { |
||||
break |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// Get returns ObjectInfo and reader for object from disk cache
|
||||
func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, err error) { |
||||
var objInfo ObjectInfo |
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object) |
||||
|
||||
if objInfo, err = c.statCache(ctx, cacheObjPath); err != nil { |
||||
return nil, toObjectErr(err, bucket, object) |
||||
} |
||||
|
||||
var nsUnlocker = func() {} |
||||
// For a directory, we need to send an reader that returns no bytes.
|
||||
if hasSuffix(object, SlashSeparator) { |
||||
// The lock taken above is released when
|
||||
// objReader.Close() is called by the caller.
|
||||
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker) |
||||
} |
||||
|
||||
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn, nsUnlocker) |
||||
if nErr != nil { |
||||
return nil, nErr |
||||
} |
||||
|
||||
filePath := path.Join(cacheObjPath, cacheDataFile) |
||||
pr, pw := io.Pipe() |
||||
go func() { |
||||
pw.CloseWithError(c.bitrotReadFromCache(ctx, filePath, off, length, pw)) |
||||
}() |
||||
// Cleanup function to cause the go routine above to exit, in
|
||||
// case of incomplete read.
|
||||
pipeCloser := func() { pr.Close() } |
||||
|
||||
return fn(pr, h, opts.CheckCopyPrecondFn, pipeCloser) |
||||
|
||||
} |
||||
|
||||
// Deletes the cached object
|
||||
func (c *diskCache) Delete(ctx context.Context, bucket, object string) (err error) { |
||||
cachePath := getCacheSHADir(c.dir, bucket, object) |
||||
return removeAll(cachePath) |
||||
|
||||
} |
||||
|
||||
// convenience function to check if object is cached on this diskCache
|
||||
func (c *diskCache) Exists(ctx context.Context, bucket, object string) bool { |
||||
if _, err := os.Stat(getCacheSHADir(c.dir, bucket, object)); err != nil { |
||||
return false |
||||
} |
||||
return true |
||||
} |
@ -1,537 +0,0 @@ |
||||
/* |
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"os" |
||||
"path" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/minio/minio/cmd/logger" |
||||
"github.com/minio/minio/pkg/disk" |
||||
"github.com/minio/minio/pkg/lock" |
||||
) |
||||
|
||||
const ( |
||||
// cache.json object metadata for cached objects.
|
||||
cacheMetaJSONFile = "cache.json" |
||||
|
||||
cacheEnvDelimiter = ";" |
||||
) |
||||
|
||||
// cacheFSObjects implements the cache backend operations.
|
||||
type cacheFSObjects struct { |
||||
*FSObjects |
||||
// caching drive path (from cache "drives" in config.json)
|
||||
dir string |
||||
// expiry in days specified in config.json
|
||||
expiry int |
||||
// max disk usage pct
|
||||
maxDiskUsagePct int |
||||
// purge() listens on this channel to start the cache-purge process
|
||||
purgeChan chan struct{} |
||||
// mark false if drive is offline
|
||||
online bool |
||||
// mutex to protect updates to online variable
|
||||
onlineMutex *sync.RWMutex |
||||
} |
||||
|
||||
// Inits the cache directory if it is not init'ed already.
|
||||
// Initializing implies creation of new FS Object layer.
|
||||
func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObjects, error) { |
||||
// Assign a new UUID for FS minio mode. Each server instance
|
||||
// gets its own UUID for temporary file transaction.
|
||||
fsUUID := mustGetUUID() |
||||
|
||||
// Initialize meta volume, if volume already exists ignores it.
|
||||
if err := initMetaVolumeFS(dir, fsUUID); err != nil { |
||||
return nil, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err) |
||||
} |
||||
|
||||
trashPath := pathJoin(dir, minioMetaBucket, cacheTrashDir) |
||||
if err := os.MkdirAll(trashPath, 0777); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if expiry == 0 { |
||||
expiry = globalCacheExpiry |
||||
} |
||||
|
||||
// Initialize fs objects.
|
||||
fsObjects := &FSObjects{ |
||||
fsPath: dir, |
||||
metaJSONFile: cacheMetaJSONFile, |
||||
fsUUID: fsUUID, |
||||
rwPool: &fsIOPool{ |
||||
readersMap: make(map[string]*lock.RLockedFile), |
||||
}, |
||||
nsMutex: newNSLock(false), |
||||
listPool: NewTreeWalkPool(globalLookupTimeout), |
||||
appendFileMap: make(map[string]*fsAppendFile), |
||||
} |
||||
|
||||
go fsObjects.cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh) |
||||
|
||||
cacheFS := cacheFSObjects{ |
||||
FSObjects: fsObjects, |
||||
dir: dir, |
||||
expiry: expiry, |
||||
maxDiskUsagePct: maxDiskUsagePct, |
||||
purgeChan: make(chan struct{}), |
||||
online: true, |
||||
onlineMutex: &sync.RWMutex{}, |
||||
} |
||||
return &cacheFS, nil |
||||
} |
||||
|
||||
// Returns if the disk usage is low.
|
||||
// Disk usage is low if usage is < 80% of cacheMaxDiskUsagePct
|
||||
// Ex. for a 100GB disk, if maxUsage is configured as 70% then cacheMaxDiskUsagePct is 70G
|
||||
// hence disk usage is low if the disk usage is less than 56G (because 80% of 70G is 56G)
|
||||
func (cfs *cacheFSObjects) diskUsageLow() bool { |
||||
|
||||
minUsage := cfs.maxDiskUsagePct * 80 / 100 |
||||
di, err := disk.GetInfo(cfs.dir) |
||||
if err != nil { |
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir) |
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo) |
||||
logger.LogIf(ctx, err) |
||||
return false |
||||
} |
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total |
||||
return int(usedPercent) < minUsage |
||||
} |
||||
|
||||
// Return if the disk usage is high.
|
||||
// Disk usage is high if disk used is > cacheMaxDiskUsagePct
|
||||
func (cfs *cacheFSObjects) diskUsageHigh() bool { |
||||
di, err := disk.GetInfo(cfs.dir) |
||||
if err != nil { |
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir) |
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo) |
||||
logger.LogIf(ctx, err) |
||||
return true |
||||
} |
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total |
||||
return int(usedPercent) > cfs.maxDiskUsagePct |
||||
} |
||||
|
||||
// Returns if size space can be allocated without exceeding
|
||||
// max disk usable for caching
|
||||
func (cfs *cacheFSObjects) diskAvailable(size int64) bool { |
||||
di, err := disk.GetInfo(cfs.dir) |
||||
if err != nil { |
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir) |
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo) |
||||
logger.LogIf(ctx, err) |
||||
return false |
||||
} |
||||
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total |
||||
return int(usedPercent) < cfs.maxDiskUsagePct |
||||
} |
||||
|
||||
// purges all content marked trash from the cache.
|
||||
func (cfs *cacheFSObjects) purgeTrash() { |
||||
ticker := time.NewTicker(time.Minute * cacheCleanupInterval) |
||||
defer ticker.Stop() |
||||
|
||||
for { |
||||
select { |
||||
case <-GlobalServiceDoneCh: |
||||
return |
||||
case <-ticker.C: |
||||
trashPath := path.Join(cfs.fsPath, minioMetaBucket, cacheTrashDir) |
||||
entries, err := readDir(trashPath) |
||||
if err != nil { |
||||
return |
||||
} |
||||
for _, entry := range entries { |
||||
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{}) |
||||
fi, err := fsStatVolume(ctx, pathJoin(trashPath, entry)) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
dir := path.Join(trashPath, fi.Name()) |
||||
|
||||
// Delete all expired cache content.
|
||||
fsRemoveAll(ctx, dir) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Purge cache entries that were not accessed.
|
||||
func (cfs *cacheFSObjects) purge() { |
||||
delimiter := SlashSeparator |
||||
maxKeys := 1000 |
||||
ctx := context.Background() |
||||
for { |
||||
olderThan := cfs.expiry |
||||
for !cfs.diskUsageLow() { |
||||
// delete unaccessed objects older than expiry duration
|
||||
expiry := UTCNow().AddDate(0, 0, -1*olderThan) |
||||
olderThan /= 2 |
||||
if olderThan < 1 { |
||||
break |
||||
} |
||||
deletedCount := 0 |
||||
buckets, err := cfs.ListBuckets(ctx) |
||||
if err != nil { |
||||
logger.LogIf(ctx, err) |
||||
} |
||||
// Reset cache online status if drive was offline earlier.
|
||||
if !cfs.IsOnline() { |
||||
cfs.setOnline(true) |
||||
} |
||||
for _, bucket := range buckets { |
||||
var continuationToken string |
||||
var marker string |
||||
for { |
||||
objects, err := cfs.ListObjects(ctx, bucket.Name, marker, continuationToken, delimiter, maxKeys) |
||||
if err != nil { |
||||
break |
||||
} |
||||
|
||||
if !objects.IsTruncated { |
||||
break |
||||
} |
||||
marker = objects.NextMarker |
||||
for _, object := range objects.Objects { |
||||
// purge objects that qualify because of cache-control directives or
|
||||
// past cache expiry duration.
|
||||
if !filterFromCache(object.UserDefined) || |
||||
!isStaleCache(object) || |
||||
object.AccTime.After(expiry) { |
||||
continue |
||||
} |
||||
if err = cfs.DeleteObject(ctx, bucket.Name, object.Name); err != nil { |
||||
logger.LogIf(ctx, err) |
||||
continue |
||||
} |
||||
deletedCount++ |
||||
} |
||||
} |
||||
} |
||||
if deletedCount == 0 { |
||||
// to avoid a busy loop
|
||||
time.Sleep(time.Minute * 30) |
||||
} |
||||
} |
||||
<-cfs.purgeChan |
||||
} |
||||
} |
||||
|
||||
// sets cache drive status
|
||||
func (cfs *cacheFSObjects) setOnline(status bool) { |
||||
cfs.onlineMutex.Lock() |
||||
cfs.online = status |
||||
cfs.onlineMutex.Unlock() |
||||
} |
||||
|
||||
// returns true if cache drive is online
|
||||
func (cfs *cacheFSObjects) IsOnline() bool { |
||||
cfs.onlineMutex.RLock() |
||||
defer cfs.onlineMutex.RUnlock() |
||||
return cfs.online |
||||
} |
||||
|
||||
// Caches the object to disk
|
||||
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) error { |
||||
if cfs.diskUsageHigh() { |
||||
select { |
||||
case cfs.purgeChan <- struct{}{}: |
||||
default: |
||||
} |
||||
return errDiskFull |
||||
} |
||||
if !cfs.diskAvailable(data.Size()) { |
||||
return errDiskFull |
||||
} |
||||
if _, err := cfs.GetBucketInfo(ctx, bucket); err != nil { |
||||
pErr := cfs.MakeBucketWithLocation(ctx, bucket, "") |
||||
if pErr != nil { |
||||
return pErr |
||||
} |
||||
} |
||||
_, err := cfs.PutObject(ctx, bucket, object, data, opts) |
||||
// if err is due to disk being offline , mark cache drive as offline
|
||||
if IsErr(err, baseErrs...) { |
||||
cfs.setOnline(false) |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// Returns the handle for the cached object
|
||||
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) { |
||||
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts) |
||||
} |
||||
|
||||
// Deletes the cached object
|
||||
func (cfs *cacheFSObjects) Delete(ctx context.Context, bucket, object string) (err error) { |
||||
return cfs.DeleteObject(ctx, bucket, object) |
||||
} |
||||
|
||||
// convenience function to check if object is cached on this cacheFSObjects
|
||||
func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bool { |
||||
_, err := cfs.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) |
||||
return err == nil |
||||
} |
||||
|
||||
// Identical to fs PutObject operation except that it uses ETag in metadata
|
||||
// headers.
|
||||
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) { |
||||
data := r.Reader |
||||
fs := cfs.FSObjects |
||||
// Lock the object.
|
||||
objectLock := fs.nsMutex.NewNSLock(ctx, bucket, object) |
||||
if err := objectLock.GetLock(globalObjectTimeout); err != nil { |
||||
return objInfo, err |
||||
} |
||||
defer objectLock.Unlock() |
||||
|
||||
// No metadata is set, allocate a new one.
|
||||
meta := make(map[string]string) |
||||
for k, v := range opts.UserDefined { |
||||
meta[k] = v |
||||
} |
||||
|
||||
var err error |
||||
|
||||
// Validate if bucket name is valid and exists.
|
||||
if _, err = fs.statBucketDir(ctx, bucket); err != nil { |
||||
return ObjectInfo{}, toObjectErr(err, bucket) |
||||
} |
||||
|
||||
fsMeta := newFSMetaV1() |
||||
fsMeta.Meta = meta |
||||
|
||||
// This is a special case with size as '0' and object ends
|
||||
// with a slash separator, we treat it like a valid operation
|
||||
// and return success.
|
||||
if isObjectDir(object, data.Size()) { |
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) { |
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object) |
||||
} |
||||
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil { |
||||
return ObjectInfo{}, toObjectErr(err, bucket, object) |
||||
} |
||||
var fi os.FileInfo |
||||
if fi, err = fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object)); err != nil { |
||||
return ObjectInfo{}, toObjectErr(err, bucket, object) |
||||
} |
||||
return fsMeta.ToObjectInfo(bucket, object, fi), nil |
||||
} |
||||
|
||||
if err = checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil { |
||||
return ObjectInfo{}, err |
||||
} |
||||
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) { |
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object) |
||||
} |
||||
|
||||
// Validate input data size and it can never be less than zero.
|
||||
if data.Size() < -1 { |
||||
logger.LogIf(ctx, errInvalidArgument) |
||||
return ObjectInfo{}, errInvalidArgument |
||||
} |
||||
|
||||
var wlk *lock.LockedFile |
||||
if bucket != minioMetaBucket { |
||||
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix) |
||||
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile) |
||||
|
||||
wlk, err = fs.rwPool.Create(fsMetaPath) |
||||
if err != nil { |
||||
logger.LogIf(ctx, err) |
||||
return ObjectInfo{}, toObjectErr(err, bucket, object) |
||||
} |
||||
// This close will allow for locks to be synchronized on `fs.json`.
|
||||
defer wlk.Close() |
||||
defer func() { |
||||
// Remove meta file when PutObject encounters any error
|
||||
if retErr != nil { |
||||
tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID) |
||||
fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir) |
||||
} |
||||
}() |
||||
} |
||||
|
||||
// Uploaded object will first be written to the temporary location which will eventually
|
||||
// be renamed to the actual location. It is first written to the temporary location
|
||||
// so that cleaning it up will be easy if the server goes down.
|
||||
tempObj := mustGetUUID() |
||||
|
||||
// Allocate a buffer to Read() from request body
|
||||
bufSize := int64(readSizeV1) |
||||
if size := data.Size(); size > 0 && bufSize > size { |
||||
bufSize = size |
||||
} |
||||
|
||||
buf := make([]byte, int(bufSize)) |
||||
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj) |
||||
bytesWritten, err := fsCreateFile(ctx, fsTmpObjPath, data, buf, data.Size()) |
||||
if err != nil { |
||||
fsRemoveFile(ctx, fsTmpObjPath) |
||||
return ObjectInfo{}, toObjectErr(err, bucket, object) |
||||
} |
||||
if fsMeta.Meta["etag"] == "" { |
||||
fsMeta.Meta["etag"] = r.MD5CurrentHexString() |
||||
} |
||||
// Should return IncompleteBody{} error when reader has fewer
|
||||
// bytes than specified in request header.
|
||||
if bytesWritten < data.Size() { |
||||
fsRemoveFile(ctx, fsTmpObjPath) |
||||
return ObjectInfo{}, IncompleteBody{} |
||||
} |
||||
|
||||
// Delete the temporary object in the case of a
|
||||
// failure. If PutObject succeeds, then there would be
|
||||
// nothing to delete.
|
||||
defer fsRemoveFile(ctx, fsTmpObjPath) |
||||
|
||||
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
|
||||
fsNSObjPath := pathJoin(fs.fsPath, bucket, object) |
||||
if err = fsRenameFile(ctx, fsTmpObjPath, fsNSObjPath); err != nil { |
||||
return ObjectInfo{}, toObjectErr(err, bucket, object) |
||||
} |
||||
|
||||
if bucket != minioMetaBucket { |
||||
// Write FS metadata after a successful namespace operation.
|
||||
if _, err = fsMeta.WriteTo(wlk); err != nil { |
||||
return ObjectInfo{}, toObjectErr(err, bucket, object) |
||||
} |
||||
} |
||||
|
||||
// Stat the file to fetch timestamp, size.
|
||||
fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)) |
||||
if err != nil { |
||||
return ObjectInfo{}, toObjectErr(err, bucket, object) |
||||
} |
||||
// Success.
|
||||
return fsMeta.ToObjectInfo(bucket, object, fi), nil |
||||
} |
||||
|
||||
// Implements S3 compatible initiate multipart API. Operation here is identical
|
||||
// to fs backend implementation - with the exception that cache FS uses the uploadID
|
||||
// generated on the backend
|
||||
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, uploadID string, opts ObjectOptions) (string, error) { |
||||
if cfs.diskUsageHigh() { |
||||
select { |
||||
case cfs.purgeChan <- struct{}{}: |
||||
default: |
||||
} |
||||
return "", errDiskFull |
||||
} |
||||
|
||||
if _, err := cfs.GetBucketInfo(ctx, bucket); err != nil { |
||||
pErr := cfs.MakeBucketWithLocation(ctx, bucket, "") |
||||
if pErr != nil { |
||||
return "", pErr |
||||
} |
||||
} |
||||
fs := cfs.FSObjects |
||||
if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil { |
||||
return "", toObjectErr(err, bucket) |
||||
} |
||||
|
||||
if _, err := fs.statBucketDir(ctx, bucket); err != nil { |
||||
return "", toObjectErr(err, bucket) |
||||
} |
||||
|
||||
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) |
||||
|
||||
err := mkdirAll(uploadIDDir, 0755) |
||||
if err != nil { |
||||
logger.LogIf(ctx, err) |
||||
return "", err |
||||
} |
||||
|
||||
// Initialize fs.json values.
|
||||
fsMeta := newFSMetaV1() |
||||
fsMeta.Meta = opts.UserDefined |
||||
|
||||
fsMetaBytes, err := json.Marshal(fsMeta) |
||||
if err != nil { |
||||
logger.LogIf(ctx, err) |
||||
return "", err |
||||
} |
||||
|
||||
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil { |
||||
logger.LogIf(ctx, err) |
||||
return "", err |
||||
} |
||||
return uploadID, nil |
||||
} |
||||
|
||||
// moveBucketToTrash clears cacheFSObjects of bucket contents and moves it to trash folder.
|
||||
func (cfs *cacheFSObjects) moveBucketToTrash(ctx context.Context, bucket string) (err error) { |
||||
fs := cfs.FSObjects |
||||
bucketLock := fs.nsMutex.NewNSLock(ctx, bucket, "") |
||||
if err = bucketLock.GetLock(globalObjectTimeout); err != nil { |
||||
return err |
||||
} |
||||
defer bucketLock.Unlock() |
||||
bucketDir, err := fs.getBucketDir(ctx, bucket) |
||||
if err != nil { |
||||
return toObjectErr(err, bucket) |
||||
} |
||||
trashPath := pathJoin(cfs.fsPath, minioMetaBucket, cacheTrashDir) |
||||
expiredDir := path.Join(trashPath, bucket) |
||||
// Attempt to move regular bucket to expired directory.
|
||||
if err = fsRenameDir(bucketDir, expiredDir); err != nil { |
||||
logger.LogIf(ctx, err) |
||||
return toObjectErr(err, bucket) |
||||
} |
||||
// Cleanup all the bucket metadata.
|
||||
ominioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket) |
||||
nminioMetadataBucketDir := pathJoin(trashPath, MustGetUUID()) |
||||
logger.LogIf(ctx, fsRenameDir(ominioMetadataBucketDir, nminioMetadataBucketDir)) |
||||
return nil |
||||
} |
||||
|
||||
// Removes a directory only if its empty, handles long
|
||||
// paths for windows automatically.
|
||||
func fsRenameDir(dirPath, newPath string) (err error) { |
||||
if dirPath == "" || newPath == "" { |
||||
return errInvalidArgument |
||||
} |
||||
|
||||
if err = checkPathLength(dirPath); err != nil { |
||||
return err |
||||
} |
||||
if err = checkPathLength(newPath); err != nil { |
||||
return err |
||||
} |
||||
if err = os.Rename(dirPath, newPath); err != nil { |
||||
if os.IsNotExist(err) { |
||||
return errVolumeNotFound |
||||
} else if isSysErrNotEmpty(err) { |
||||
return errVolumeNotEmpty |
||||
} |
||||
return err |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,170 @@ |
||||
/* |
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"io" |
||||
"os" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/minio/minio/cmd/crypto" |
||||
) |
||||
|
||||
type cacheControl struct { |
||||
expiry time.Time |
||||
maxAge int |
||||
sMaxAge int |
||||
minFresh int |
||||
maxStale int |
||||
} |
||||
|
||||
func (c cacheControl) isEmpty() bool { |
||||
return c == cacheControl{} |
||||
|
||||
} |
||||
|
||||
func (c cacheControl) isStale(modTime time.Time) bool { |
||||
if c.isEmpty() { |
||||
return false |
||||
} |
||||
now := time.Now() |
||||
|
||||
if c.sMaxAge > 0 && c.sMaxAge < int(now.Sub(modTime).Seconds()) { |
||||
return true |
||||
} |
||||
if c.maxAge > 0 && c.maxAge < int(now.Sub(modTime).Seconds()) { |
||||
return true |
||||
} |
||||
|
||||
if !c.expiry.Equal(time.Time{}) && c.expiry.Before(time.Now().Add(time.Duration(c.maxStale))) { |
||||
return true |
||||
} |
||||
|
||||
if c.minFresh > 0 && c.minFresh <= int(now.Sub(modTime).Seconds()) { |
||||
return true |
||||
} |
||||
|
||||
return false |
||||
} |
||||
|
||||
// returns struct with cache-control settings from user metadata.
|
||||
func cacheControlOpts(o ObjectInfo) (c cacheControl) { |
||||
m := o.UserDefined |
||||
if o.Expires != timeSentinel { |
||||
c.expiry = o.Expires |
||||
} |
||||
|
||||
var headerVal string |
||||
for k, v := range m { |
||||
if strings.ToLower(k) == "cache-control" { |
||||
headerVal = v |
||||
} |
||||
|
||||
} |
||||
if headerVal == "" { |
||||
return |
||||
} |
||||
headerVal = strings.ToLower(headerVal) |
||||
headerVal = strings.TrimSpace(headerVal) |
||||
|
||||
vals := strings.Split(headerVal, ",") |
||||
for _, val := range vals { |
||||
val = strings.TrimSpace(val) |
||||
p := strings.Split(val, "=") |
||||
|
||||
if len(p) != 2 { |
||||
continue |
||||
} |
||||
if p[0] == "max-age" || |
||||
p[0] == "s-maxage" || |
||||
p[0] == "min-fresh" || |
||||
p[0] == "max-stale" { |
||||
i, err := strconv.Atoi(p[1]) |
||||
if err != nil { |
||||
return cacheControl{} |
||||
} |
||||
if p[0] == "max-age" { |
||||
c.maxAge = i |
||||
} |
||||
if p[0] == "s-maxage" { |
||||
c.sMaxAge = i |
||||
} |
||||
if p[0] == "min-fresh" { |
||||
c.minFresh = i |
||||
} |
||||
if p[0] == "max-stale" { |
||||
c.maxStale = i |
||||
} |
||||
} |
||||
} |
||||
return c |
||||
} |
||||
|
||||
// backendDownError returns true if err is due to backend failure or faulty disk if in server mode
|
||||
func backendDownError(err error) bool { |
||||
_, backendDown := err.(BackendDown) |
||||
return backendDown || IsErr(err, baseErrs...) |
||||
} |
||||
|
||||
// IsCacheable returns if the object should be saved in the cache.
|
||||
func (o ObjectInfo) IsCacheable() bool { |
||||
return !crypto.IsEncrypted(o.UserDefined) |
||||
} |
||||
|
||||
// reads file cached on disk from offset upto length
|
||||
func readCacheFileStream(filePath string, offset, length int64) (io.ReadCloser, error) { |
||||
if filePath == "" || offset < 0 { |
||||
return nil, errInvalidArgument |
||||
} |
||||
if err := checkPathLength(filePath); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
fr, err := os.Open(filePath) |
||||
if err != nil { |
||||
return nil, osErrToFSFileErr(err) |
||||
} |
||||
// Stat to get the size of the file at path.
|
||||
st, err := fr.Stat() |
||||
if err != nil { |
||||
err = osErrToFSFileErr(err) |
||||
return nil, err |
||||
} |
||||
|
||||
// Verify if its not a regular file, since subsequent Seek is undefined.
|
||||
if !st.Mode().IsRegular() { |
||||
return nil, errIsNotRegular |
||||
} |
||||
|
||||
if err = os.Chtimes(filePath, time.Now(), st.ModTime()); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Seek to the requested offset.
|
||||
if offset > 0 { |
||||
_, err = fr.Seek(offset, io.SeekStart) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
return struct { |
||||
io.Reader |
||||
io.Closer |
||||
}{Reader: io.LimitReader(fr, length), Closer: fr}, nil |
||||
} |
@ -0,0 +1,60 @@ |
||||
/* |
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"net/http" |
||||
"reflect" |
||||
"testing" |
||||
"time" |
||||
) |
||||
|
||||
func TestGetCacheControlOpts(t *testing.T) { |
||||
expiry, _ := time.Parse(http.TimeFormat, "Wed, 21 Oct 2015 07:28:00 GMT") |
||||
|
||||
testCases := []struct { |
||||
cacheControlHeaderVal string |
||||
expiryHeaderVal time.Time |
||||
expectedCacheControl cacheControl |
||||
expectedErr bool |
||||
}{ |
||||
{"", timeSentinel, cacheControl{}, false}, |
||||
{"max-age=2592000, public", timeSentinel, cacheControl{maxAge: 2592000, sMaxAge: 0, minFresh: 0, expiry: time.Time{}}, false}, |
||||
{"max-age=2592000, no-store", timeSentinel, cacheControl{maxAge: 2592000, sMaxAge: 0, minFresh: 0, expiry: time.Time{}}, false}, |
||||
{"must-revalidate, max-age=600", timeSentinel, cacheControl{maxAge: 600, sMaxAge: 0, minFresh: 0, expiry: time.Time{}}, false}, |
||||
{"s-maxAge=2500, max-age=600", timeSentinel, cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, false}, |
||||
{"s-maxAge=2500, max-age=600", expiry, cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Date(2015, time.October, 21, 07, 28, 00, 00, time.UTC)}, false}, |
||||
{"s-maxAge=2500, max-age=600s", timeSentinel, cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, true}, |
||||
} |
||||
var m map[string]string |
||||
|
||||
for i, testCase := range testCases { |
||||
m = make(map[string]string) |
||||
m["cache-control"] = testCase.cacheControlHeaderVal |
||||
if testCase.expiryHeaderVal != timeSentinel { |
||||
m["expires"] = testCase.expiryHeaderVal.String() |
||||
} |
||||
c := cacheControlOpts(ObjectInfo{UserDefined: m, Expires: testCase.expiryHeaderVal}) |
||||
if testCase.expectedErr && (c != cacheControl{}) { |
||||
t.Errorf("expected err for case %d", i) |
||||
} |
||||
if !testCase.expectedErr && !reflect.DeepEqual(c, testCase.expectedCacheControl) { |
||||
t.Errorf("expected %v got %v for case %d", testCase.expectedCacheControl, c, i) |
||||
} |
||||
|
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
Loading…
Reference in new issue