diff --git a/cmd/erasure-common.go b/cmd/erasure-common.go index 5f193abb3..4fd83dd45 100644 --- a/cmd/erasure-common.go +++ b/cmd/erasure-common.go @@ -37,6 +37,37 @@ func (er erasureObjects) getLoadBalancedLocalDisks() (newDisks []StorageAPI) { return newDisks } +func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) { + disks := er.getDisks() + var wg sync.WaitGroup + var mu sync.Mutex + for _, i := range hashOrder(UTCNow().String(), len(disks)) { + i := i + wg.Add(1) + go func() { + defer wg.Done() + if disks[i-1] == nil { + return + } + di, err := disks[i-1].DiskInfo(context.Background()) + if err != nil || di.Healing { + // - Do not consume disks which are not reachable + // unformatted or simply not accessible for some reason. + // + // - Do not consume disks which are being healed + // + // - Future: skip busy disks + return + } + + mu.Lock() + newDisks = append(newDisks, disks[i-1]) + mu.Unlock() + }() + } + return newDisks +} + // getLoadBalancedNDisks - fetches load balanced (sufficiently randomized) disk slice // with N disks online. If ndisks is zero or negative, then it will returns all disks, // same if ndisks is greater than the number of all disks. @@ -89,8 +120,8 @@ func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI { } mu.Lock() - // Capture disks usage wise - newDisks[di.Used] = append(newDisks[di.Used], disks[i-1]) + // Capture disks usage wise upto resolution of MiB + newDisks[di.Used/1024/1024] = append(newDisks[di.Used/1024/1024], disks[i-1]) mu.Unlock() }() } diff --git a/cmd/erasure.go b/cmd/erasure.go index b453adad1..45f0c9ed1 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -252,7 +252,7 @@ func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []Buc } // Collect disks we can use. - disks := er.getLoadBalancedDisks(true) + disks := er.getOnlineDisks() if len(disks) == 0 { logger.Info(color.Green("data-crawl:") + " all disks are offline or being healed, skipping crawl") return nil