fix: issues with handling delete markers in metacache (#11150)

Additional cases handled

- fix address situations where healing is not
  triggered on failed writes and deletes.

- consider object exists during listing when
  metadata can be successfully decoded.
master
Harshavardhana 4 years ago committed by GitHub
parent 274bbad5cb
commit 35fafb837b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 20
      cmd/erasure-multipart.go
  2. 42
      cmd/erasure-object.go
  3. 15
      cmd/metacache-entries.go
  4. 2
      cmd/metacache-server-pool.go
  5. 8
      cmd/metacache-set.go
  6. 2
      cmd/metacache-stream.go

@ -846,21 +846,21 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
}
// Check if there is any offline disk and add it to the MRF list
for i, disk := range onlineDisks {
if disk == nil || storageDisks[i] == nil {
er.addPartial(bucket, object, fi.VersionID)
break
for _, disk := range onlineDisks {
if disk != nil && disk.IsOnline() {
continue
}
er.addPartial(bucket, object, fi.VersionID)
break
}
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] == nil {
continue
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
// Success, return object info.

@ -727,20 +727,20 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
// Whether a disk was initially or becomes offline
// during this upload, send it to the MRF list.
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] == nil || storageDisks[i] == nil {
er.addPartial(bucket, object, fi.VersionID)
break
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
continue
}
er.addPartial(bucket, object, fi.VersionID)
break
}
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] == nil {
continue
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
return fi.ToObjectInfo(bucket, object), nil
@ -922,16 +922,15 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
for _, version := range versions {
// Check if there is any offline disk and add it to the MRF list
for _, disk := range storageDisks {
if disk == nil {
// ignore delete markers for quorum
if version.Deleted {
continue
}
// all other direct versionId references we should
// ensure no dangling file is left over.
er.addPartial(bucket, version.Name, version.VersionID)
break
if disk != nil && disk.IsOnline() {
// Skip attempted heal on online disks.
continue
}
// all other direct versionId references we should
// ensure no dangling file is left over.
er.addPartial(bucket, version.Name, version.VersionID)
break
}
}
@ -1042,10 +1041,11 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
}
for _, disk := range storageDisks {
if disk == nil {
er.addPartial(bucket, object, opts.VersionID)
break
if disk != nil && disk.IsOnline() {
continue
}
er.addPartial(bucket, object, opts.VersionID)
break
}
return ObjectInfo{

@ -73,11 +73,13 @@ func (e *metaCacheEntry) matches(other *metaCacheEntry, bucket string) bool {
if len(e.metadata) != len(other.metadata) || e.name != other.name {
return false
}
eFi, eErr := e.fileInfo(bucket)
oFi, oErr := e.fileInfo(bucket)
oFi, oErr := other.fileInfo(bucket)
if eErr != nil || oErr != nil {
return eErr == oErr
}
return eFi.ModTime.Equal(oFi.ModTime) && eFi.Size == oFi.Size && eFi.VersionID == oFi.VersionID
}
@ -213,11 +215,12 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa
}
// Get new entry metadata
objExists++
fiv, err := entry.fileInfo(r.bucket)
if err != nil {
continue
}
objExists++
if selFIV == nil {
selected = entry
selFIV = fiv
@ -227,14 +230,8 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa
if selected.matches(entry, r.bucket) {
continue
}
// Select latest modtime.
if fiv.ModTime.After(selFIV.ModTime) {
selected = entry
selFIV = fiv
continue
}
}
// If directory, we need quorum.
if dirExists > 0 && dirExists < r.dirQuorum {
return nil, false

@ -147,13 +147,11 @@ func (z *erasureServerPools) listPath(ctx context.Context, o listPathOptions) (e
var wg sync.WaitGroup
var errs []error
allAtEOF := true
asked := 0
mu.Lock()
// Ask all sets and merge entries.
for _, zone := range z.serverPools {
for _, set := range zone.sets {
wg.Add(1)
asked++
go func(i int, set *erasureObjects) {
defer wg.Done()
e, err := set.listPath(ctx, o)

@ -168,11 +168,9 @@ func (o *listPathOptions) gatherResults(in <-chan metaCacheEntry) func() (metaCa
o.debugln("not in dir", o.Prefix, o.Separator)
continue
}
if !o.InclDeleted && entry.isObject() {
if entry.isLatestDeletemarker() {
o.debugln("latest delete")
continue
}
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() {
o.debugln("latest is delete marker")
continue
}
if o.Limit > 0 && results.len() >= o.Limit {
// We have enough and we have more.

@ -519,7 +519,7 @@ func (r *metacacheReader) readN(n int, inclDeleted, inclDirs bool, prefix string
if !inclDirs && meta.isDir() {
continue
}
if meta.isDir() && !inclDeleted && meta.isLatestDeletemarker() {
if !inclDeleted && meta.isLatestDeletemarker() {
continue
}
res = append(res, meta)

Loading…
Cancel
Save