fix: remove the requirement for healing buckets in ListBucketsHeal (#11098)

With new refactor of bucket healing, healing bucket happens
automatically including its metadata, there is no need to
redundant heal buckets also in ListBucketsHeal remove
it.
master
Harshavardhana 4 years ago committed by GitHub
parent 3e83643320
commit 8368ab76aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      cmd/admin-heal-ops.go
  2. 2
      cmd/background-newdisks-heal-ops.go
  3. 10
      cmd/bucket-metadata-sys.go
  4. 33
      cmd/erasure-bucket.go
  5. 52
      cmd/erasure-server-sets.go
  6. 63
      cmd/erasure-sets.go
  7. 5
      cmd/fs-v1.go
  8. 10
      cmd/gateway-unsupported.go
  9. 4
      cmd/metacache-set.go
  10. 2
      cmd/object-api-interface.go
  11. 11
      cmd/prepare-storage.go
  12. 2
      cmd/server-main.go

@ -868,7 +868,7 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
return h.healBucket(objAPI, h.bucket, bucketsOnly)
}
buckets, err := objAPI.ListBucketsHeal(h.ctx)
buckets, err := objAPI.ListBuckets(h.ctx)
if err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}

@ -160,7 +160,7 @@ wait:
erasureSetInZoneDisksToHeal[zoneIdx][setIndex] = append(erasureSetInZoneDisksToHeal[zoneIdx][setIndex], disk)
}
buckets, _ := z.ListBucketsHeal(ctx)
buckets, _ := z.ListBuckets(ctx)
for i, setMap := range erasureSetInZoneDisksToHeal {
for setIndex, disks := range setMap {
for _, disk := range disks {

@ -191,7 +191,6 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
// This function should only be used with
// - GetBucketInfo
// - ListBuckets
// - ListBucketsHeal (only in case of erasure coding mode)
// For all other bucket specific metadata, use the relevant
// calls implemented specifically for each of those features.
func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
@ -440,11 +439,10 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
for index := range buckets {
index := index
g.Go(func() error {
if _, err := objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{
Remove: true,
}); err != nil {
return err
}
_, _ = objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{
// Ensure heal opts for bucket metadata be deep healed all the time.
ScanMode: madmin.HealDeepScan,
})
meta, err := loadBucketMetadata(ctx, objAPI, buckets[index].Name)
if err != nil {
return err

@ -31,39 +31,6 @@ var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnform
// list all errors that can be ignored in a bucket metadata operation.
var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
// MakeMultipleBuckets - create a list of buckets
func (er erasureObjects) MakeMultipleBuckets(ctx context.Context, bucketsInfo ...BucketInfo) error {
storageDisks := er.getDisks()
g := errgroup.WithNErrs(len(storageDisks))
buckets := make([]string, len(bucketsInfo))
for i := range bucketsInfo {
buckets[i] = bucketsInfo[i].Name
}
// Make a volume entry on all underlying storage disks.
for index := range storageDisks {
index := index
g.Go(func() error {
if storageDisks[index] != nil {
if err := storageDisks[index].MakeVolBulk(ctx, buckets...); err != nil {
if !errors.Is(err, errVolumeExists) {
logger.LogIf(ctx, err)
}
return err
}
return nil
}
return errDiskNotFound
}, index)
}
writeQuorum := getWriteQuorum(len(storageDisks))
err := reduceWriteQuorumErrs(ctx, g.Wait(), bucketOpIgnoredErrs, writeQuorum)
return toObjectErr(err)
}
/// Bucket operations
// MakeBucket - make a bucket.

@ -430,28 +430,6 @@ func (z *erasureServerPools) CrawlAndGetDataUsage(ctx context.Context, bf *bloom
return firstErr
}
func (z *erasureServerPools) MakeMultipleBuckets(ctx context.Context, buckets ...BucketInfo) error {
g := errgroup.WithNErrs(len(z.serverPools))
// Create buckets in parallel across all sets.
for index := range z.serverPools {
index := index
g.Go(func() error {
return z.serverPools[index].MakeMultipleBuckets(ctx, buckets...)
}, index)
}
errs := g.Wait()
// Return the first encountered error
for _, err := range errs {
if err != nil {
return err
}
}
return nil
}
// MakeBucketWithLocation - creates a new bucket across all serverPools simultaneously
// even if one of the sets fail to create buckets, we proceed all the successful
// operations.
@ -1181,13 +1159,8 @@ func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts
Bucket: bucket,
}
// Ensure heal opts for bucket metadata be deep healed all the time.
opts.ScanMode = madmin.HealDeepScan
// Ignore the results on purpose.
if _, err := z.HealObject(ctx, minioMetaBucket, pathJoin(bucketConfigPrefix, bucket), "", opts); err != nil {
logger.LogIf(ctx, fmt.Errorf("Healing bucket metadata for %s failed", bucket))
}
// Attempt heal on the bucket metadata, ignore any failures
_, _ = z.HealObject(ctx, minioMetaBucket, pathJoin(bucketConfigPrefix, bucket, bucketMetadataFile), "", opts)
for _, zone := range z.serverPools {
result, err := zone.HealBucket(ctx, bucket, opts)
@ -1203,6 +1176,7 @@ func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts
r.Before.Drives = append(r.Before.Drives, result.Before.Drives...)
r.After.Drives = append(r.After.Drives, result.After.Drives...)
}
return r, nil
}
@ -1372,26 +1346,6 @@ func (z *erasureServerPools) HealObject(ctx context.Context, bucket, object, ver
}
}
func (z *erasureServerPools) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
var healBuckets []BucketInfo
for _, zone := range z.serverPools {
bucketsInfo, err := zone.ListBucketsHeal(ctx)
if err != nil {
continue
}
healBuckets = append(healBuckets, bucketsInfo...)
}
for i := range healBuckets {
meta, err := globalBucketMetadataSys.Get(healBuckets[i].Name)
if err == nil {
healBuckets[i].Created = meta.Created
}
}
return healBuckets, nil
}
// GetMetrics - no op
func (z *erasureServerPools) GetMetrics(ctx context.Context) (*Metrics, error) {
logger.LogIf(ctx, NotImplemented{})

@ -562,31 +562,6 @@ func (s *erasureSets) Shutdown(ctx context.Context) error {
return nil
}
// MakeMultipleBuckets - make many buckets at once.
func (s *erasureSets) MakeMultipleBuckets(ctx context.Context, buckets ...BucketInfo) error {
g := errgroup.WithNErrs(len(s.sets))
// Create buckets in parallel across all sets.
for index := range s.sets {
index := index
g.Go(func() error {
return s.sets[index].MakeMultipleBuckets(ctx, buckets...)
}, index)
}
errs := g.Wait()
// Return the first encountered error
for _, err := range errs {
if err != nil {
return err
}
}
// Success.
return nil
}
// MakeBucketLocation - creates a new bucket across all sets simultaneously,
// then return the first encountered error
func (s *erasureSets) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error {
@ -739,8 +714,20 @@ func undoDeleteBucketSets(ctx context.Context, bucket string, sets []*erasureObj
// sort here just for simplification. As per design it is assumed
// that all buckets are present on all sets.
func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
// Always lists from the same set signified by the empty string.
return s.ListBucketsHeal(ctx)
var listBuckets []BucketInfo
var healBuckets = map[string]VolInfo{}
for _, set := range s.sets {
// lists all unique buckets across drives.
if err := listAllBuckets(ctx, set.getDisks(), healBuckets); err != nil {
return nil, err
}
}
for _, v := range healBuckets {
listBuckets = append(listBuckets, BucketInfo(v))
}
sort.Sort(byBucketName(listBuckets))
return listBuckets, nil
}
// --- Object Operations ---
@ -1319,28 +1306,6 @@ func (s *erasureSets) HealObject(ctx context.Context, bucket, object, versionID
return s.getHashedSet(object).HealObject(ctx, bucket, object, versionID, opts)
}
// Lists all buckets which need healing.
func (s *erasureSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
var listBuckets []BucketInfo
var healBuckets = map[string]VolInfo{}
for _, set := range s.sets {
// lists all unique buckets across drives.
if err := listAllBuckets(ctx, set.getDisks(), healBuckets); err != nil {
return nil, err
}
}
for _, v := range healBuckets {
listBuckets = append(listBuckets, BucketInfo(v))
}
sort.Sort(byBucketName(listBuckets))
if err := s.MakeMultipleBuckets(ctx, listBuckets...); err != nil {
return listBuckets, err
}
return listBuckets, nil
}
// PutObjectTags - replace or add tags to an existing object
func (s *erasureSets) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error {
return s.getHashedSet(object).PutObjectTags(ctx, bucket, object, tags, opts)

@ -1565,11 +1565,6 @@ func (fs *FSObjects) HealObjects(ctx context.Context, bucket, prefix string, opt
return NotImplemented{}
}
// ListBucketsHeal - list all buckets to be healed. Valid only for Erasure, returns ListBuckets() in single drive mode.
func (fs *FSObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
return fs.ListBuckets(ctx)
}
// GetMetrics - no op
func (fs *FSObjects) GetMetrics(ctx context.Context) (*Metrics, error) {
logger.LogIf(ctx, NotImplemented{})

@ -51,11 +51,6 @@ func (a GatewayUnsupported) SetDriveCount() int {
return 0
}
// MakeMultipleBuckets is dummy stub for gateway.
func (a GatewayUnsupported) MakeMultipleBuckets(ctx context.Context, buckets ...BucketInfo) error {
return NotImplemented{}
}
// ListMultipartUploads lists all multipart uploads.
func (a GatewayUnsupported) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) {
return lmi, NotImplemented{}
@ -175,11 +170,6 @@ func (a GatewayUnsupported) HealBucket(ctx context.Context, bucket string, opts
return madmin.HealResultItem{}, NotImplemented{}
}
// ListBucketsHeal - Not implemented stub
func (a GatewayUnsupported) ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) {
return nil, NotImplemented{}
}
// HealObject - Not implemented stub
func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (h madmin.HealResultItem, e error) {
return h, NotImplemented{}

@ -277,8 +277,10 @@ func getMetacacheBlockInfo(fi FileInfo, block int) (*metacacheBlock, error) {
return &tmp, json.Unmarshal([]byte(v), &tmp)
}
const metacachePrefix = ".metacache"
func metacachePrefixForID(bucket, id string) string {
return pathJoin("buckets", bucket, ".metacache", id)
return pathJoin(bucketMetaPrefix, bucket, metacachePrefix, id)
}
// objectPath returns the object path of the cache.

@ -82,7 +82,6 @@ type ObjectLayer interface {
StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) // local queries only local disks
// Bucket operations.
MakeMultipleBuckets(ctx context.Context, bucketInfo ...BucketInfo) error
MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error
GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error)
@ -125,7 +124,6 @@ type ObjectLayer interface {
HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error)
HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error)
HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) error
ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error)
// Policy operations
SetBucketPolicy(context.Context, string, *policy.Policy) error

@ -125,6 +125,17 @@ func formatErasureCleanupTmpLocalEndpoints(endpoints Endpoints) error {
osErrToFileErr(err))
}
// Move .minio.sys/buckets/.minio.sys/metacache transient list cache
// folder to speed up startup routines.
tmpMetacacheOld := pathJoin(epPath, minioMetaTmpBucket+"-old", mustGetUUID())
if err := renameAll(pathJoin(epPath, minioMetaBucket, metacachePrefixForID(minioMetaBucket, "")),
tmpMetacacheOld); err != nil && err != errFileNotFound {
return fmt.Errorf("unable to rename (%s -> %s) %w",
pathJoin(epPath, minioMetaBucket+metacachePrefixForID(minioMetaBucket, "")),
tmpMetacacheOld,
osErrToFileErr(err))
}
// Removal of tmp-old folder is backgrounded completely.
go removeAll(pathJoin(epPath, minioMetaTmpBucket+"-old"))

@ -310,7 +310,7 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) {
rquorum := InsufficientReadQuorum{}
wquorum := InsufficientWriteQuorum{}
buckets, err := newObject.ListBucketsHeal(ctx)
buckets, err := newObject.ListBuckets(ctx)
if err != nil {
return fmt.Errorf("Unable to list buckets to heal: %w", err)
}

Loading…
Cancel
Save