fix: discarding results do not attempt in-memory metacache writer (#11163)

Optimizations include

- do not write the metacache block if the size of the
  block is '0' and it is the first block - where listing
  is attempted for a transient prefix, this helps to
  avoid creating lots of empty metacache entries for
  `minioMetaBucket`

- avoid the entire initialization sequence of cacheCh
  , metacacheBlockWriter if we are simply going to skip
  them when discardResults is set to true.

- No need to hold write locks while writing metacache
  blocks - each block is unique, per bucket, per prefix
  and also is written by a single node.
master
Harshavardhana 4 years ago committed by GitHub
parent 45ea161f8d
commit 027e17468a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 14
      cmd/data-crawler.go
  2. 10
      cmd/erasure-object.go
  3. 12
      cmd/global-heal.go
  4. 127
      cmd/metacache-set.go
  5. 1
      cmd/object-api-interface.go

@ -570,8 +570,13 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
bucket: bucket, bucket: bucket,
object: entry.name, object: entry.name,
versionID: "", versionID: "",
opts: &madmin.HealOpts{
Remove: true,
},
}, madmin.HealItemObject) }, madmin.HealItemObject)
logger.LogIf(ctx, err) if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogIf(ctx, err)
}
foundObjs = foundObjs || err == nil foundObjs = foundObjs || err == nil
return return
} }
@ -583,8 +588,13 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
bucket: bucket, bucket: bucket,
object: fiv.Name, object: fiv.Name,
versionID: ver.VersionID, versionID: ver.VersionID,
opts: &madmin.HealOpts{
Remove: true,
},
}, madmin.HealItemObject) }, madmin.HealItemObject)
logger.LogIf(ctx, err) if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogIf(ctx, err)
}
foundObjs = foundObjs || err == nil foundObjs = foundObjs || err == nil
} }
}, },

@ -674,11 +674,13 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object} return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object}
} }
lk := er.NewNSLock(bucket, object) if !opts.NoLock {
if err := lk.GetLock(ctx, globalOperationTimeout); err != nil { lk := er.NewNSLock(bucket, object)
return ObjectInfo{}, err if err := lk.GetLock(ctx, globalOperationTimeout); err != nil {
return ObjectInfo{}, err
}
defer lk.Unlock()
} }
defer lk.Unlock()
for i, w := range writers { for i, w := range writers {
if w == nil { if w == nil {

@ -130,7 +130,9 @@ func healErasureSet(ctx context.Context, setIndex int, buckets []BucketInfo, dis
bucket: minioMetaBucket, bucket: minioMetaBucket,
object: backendEncryptedFile, object: backendEncryptedFile,
}, madmin.HealItemMetadata); err != nil { }, madmin.HealItemMetadata); err != nil {
logger.LogIf(ctx, err) if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogIf(ctx, err)
}
} }
// Heal all buckets with all objects // Heal all buckets with all objects
@ -139,7 +141,9 @@ func healErasureSet(ctx context.Context, setIndex int, buckets []BucketInfo, dis
if err := bgSeq.queueHealTask(healSource{ if err := bgSeq.queueHealTask(healSource{
bucket: bucket.Name, bucket: bucket.Name,
}, madmin.HealItemBucket); err != nil { }, madmin.HealItemBucket); err != nil {
logger.LogIf(ctx, err) if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogIf(ctx, err)
}
} }
var entryChs []FileInfoVersionsCh var entryChs []FileInfoVersionsCh
@ -179,7 +183,9 @@ func healErasureSet(ctx context.Context, setIndex int, buckets []BucketInfo, dis
object: version.Name, object: version.Name,
versionID: version.VersionID, versionID: version.VersionID,
}, madmin.HealItemObject); err != nil { }, madmin.HealItemObject); err != nil {
logger.LogIf(ctx, err) if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogIf(ctx, err)
}
} }
} }
} }

@ -615,13 +615,18 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entr
} }
// Create output for our results. // Create output for our results.
cacheCh := make(chan metaCacheEntry, metacacheBlockSize) var cacheCh chan metaCacheEntry
if !o.discardResult {
cacheCh = make(chan metaCacheEntry, metacacheBlockSize)
}
// Create filter for results. // Create filter for results.
filterCh := make(chan metaCacheEntry, 100) filterCh := make(chan metaCacheEntry, 100)
filteredResults := o.gatherResults(filterCh) filteredResults := o.gatherResults(filterCh)
closeChannels := func() { closeChannels := func() {
close(cacheCh) if !o.discardResult {
close(cacheCh)
}
close(filterCh) close(filterCh)
} }
@ -657,54 +662,62 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entr
}() }()
const retryDelay = 200 * time.Millisecond const retryDelay = 200 * time.Millisecond
const maxTries = 10 const maxTries = 5
// Write results to disk. var bw *metacacheBlockWriter
bw := newMetacacheBlockWriter(cacheCh, func(b *metacacheBlock) error { // Don't save single object listings.
if o.discardResult { if !o.discardResult {
// Don't save single object listings. // Write results to disk.
return nil bw = newMetacacheBlockWriter(cacheCh, func(b *metacacheBlock) error {
} // if the block is 0 bytes and its a first block skip it.
o.debugln("listPath: saving block", b.n, "to", o.objectPath(b.n)) // skip only this for Transient caches.
r, err := hash.NewReader(bytes.NewBuffer(b.data), int64(len(b.data)), "", "", int64(len(b.data)), false) if len(b.data) == 0 && b.n == 0 && o.Transient {
logger.LogIf(ctx, err) return nil
custom := b.headerKV()
_, err = er.putObject(ctx, minioMetaBucket, o.objectPath(b.n), NewPutObjReader(r, nil, nil), ObjectOptions{UserDefined: custom})
if err != nil {
metaMu.Lock()
if meta.error != "" {
meta.status = scanStateError
meta.error = err.Error()
}
metaMu.Unlock()
cancel()
return err
}
if b.n == 0 {
return nil
}
// Update block 0 metadata.
var retries int
for {
err := er.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), b.headerKV(), ObjectOptions{})
if err == nil {
break
} }
switch err.(type) { o.debugln("listPath: saving block", b.n, "to", o.objectPath(b.n))
case ObjectNotFound: r, err := hash.NewReader(bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data)), false)
logger.LogIf(ctx, err)
custom := b.headerKV()
_, err = er.putObject(ctx, minioMetaBucket, o.objectPath(b.n), NewPutObjReader(r, nil, nil), ObjectOptions{
UserDefined: custom,
NoLock: true, // No need to hold namespace lock, each prefix caches uniquely.
})
if err != nil {
metaMu.Lock()
if meta.error != "" {
meta.status = scanStateError
meta.error = err.Error()
}
metaMu.Unlock()
cancel()
return err return err
case InsufficientReadQuorum:
default:
logger.LogIf(ctx, err)
} }
if retries >= maxTries { if b.n == 0 {
return err return nil
} }
retries++ // Update block 0 metadata.
time.Sleep(retryDelay) var retries int
} for {
return nil err := er.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), b.headerKV(), ObjectOptions{})
}) if err == nil {
break
}
switch err.(type) {
case ObjectNotFound:
return err
case InsufficientReadQuorum:
default:
logger.LogIf(ctx, err)
}
if retries >= maxTries {
return err
}
retries++
time.Sleep(retryDelay)
}
return nil
})
}
// How to resolve results. // How to resolve results.
resolver := metadataResolutionParams{ resolver := metadataResolutionParams{
@ -721,14 +734,18 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entr
filterPrefix: o.FilterPrefix, filterPrefix: o.FilterPrefix,
minDisks: listingQuorum, minDisks: listingQuorum,
agreed: func(entry metaCacheEntry) { agreed: func(entry metaCacheEntry) {
cacheCh <- entry if !o.discardResult {
cacheCh <- entry
}
filterCh <- entry filterCh <- entry
}, },
partial: func(entries metaCacheEntries, nAgreed int, errs []error) { partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
// Results Disagree :-( // Results Disagree :-(
entry, ok := entries.resolve(&resolver) entry, ok := entries.resolve(&resolver)
if ok { if ok {
cacheCh <- *entry if !o.discardResult {
cacheCh <- *entry
}
filterCh <- *entry filterCh <- *entry
} }
}, },
@ -749,12 +766,14 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entr
metaMu.Unlock() metaMu.Unlock()
closeChannels() closeChannels()
if err := bw.Close(); err != nil { if !o.discardResult {
metaMu.Lock() if err := bw.Close(); err != nil {
meta.error = err.Error() metaMu.Lock()
meta.status = scanStateError meta.error = err.Error()
meta, err = o.updateMetacacheListing(meta, rpc) meta.status = scanStateError
metaMu.Unlock() meta, err = o.updateMetacacheListing(meta, rpc)
metaMu.Unlock()
}
} }
}() }()

@ -51,6 +51,7 @@ type ObjectOptions struct {
DeleteMarkerReplicationStatus string // Is only set in DELETE operations DeleteMarkerReplicationStatus string // Is only set in DELETE operations
VersionPurgeStatus VersionPurgeStatusType // Is only set in DELETE operations for delete marker version to be permanently deleted. VersionPurgeStatus VersionPurgeStatusType // Is only set in DELETE operations for delete marker version to be permanently deleted.
TransitionStatus string // status of the transition TransitionStatus string // status of the transition
NoLock bool // indicates to lower layers if the caller is expecting to hold locks.
} }
// BucketOptions represents bucket options for ObjectLayer bucket operations // BucketOptions represents bucket options for ObjectLayer bucket operations

Loading…
Cancel
Save