avoid unnecessary logging on fresh/newly replaced drives (#9470)

data usage tracker and crawler seem to be logging
non-actionable information on console, which is not
useful and is fixed on its own in almost all deployments,
lets keep this logging to minimal.
master
Harshavardhana 5 years ago committed by GitHub
parent bc61417284
commit 498389123e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 6
      cmd/data-update-tracker.go
  2. 3
      cmd/data-usage-cache.go
  3. 24
      cmd/data-usage.go
  4. 4
      cmd/posix.go

@ -45,8 +45,8 @@ const (
dataUpdateTrackerFP = 0.99 dataUpdateTrackerFP = 0.99
dataUpdateTrackerQueueSize = 10000 dataUpdateTrackerQueueSize = 10000
dataUpdateTrackerFilename = dataUsageBucket + SlashSeparator + ".tracker.bin"
dataUpdateTrackerVersion = 1 dataUpdateTrackerVersion = 1
dataUpdateTrackerFilename = minioMetaBucket + SlashSeparator + bucketMetaPrefix + SlashSeparator + ".tracker.bin"
dataUpdateTrackerSaveInterval = 5 * time.Minute dataUpdateTrackerSaveInterval = 5 * time.Minute
// Reset bloom filters every n cycle // Reset bloom filters every n cycle
@ -195,6 +195,7 @@ func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) {
return return
} }
for _, drive := range drives { for _, drive := range drives {
cacheFormatPath := pathJoin(drive, dataUpdateTrackerFilename) cacheFormatPath := pathJoin(drive, dataUpdateTrackerFilename)
f, err := os.Open(cacheFormatPath) f, err := os.Open(cacheFormatPath)
if err != nil { if err != nil {
@ -255,6 +256,9 @@ func (d *dataUpdateTracker) startSaver(ctx context.Context, interval time.Durati
cacheFormatPath := pathJoin(drive, dataUpdateTrackerFilename) cacheFormatPath := pathJoin(drive, dataUpdateTrackerFilename)
err := ioutil.WriteFile(cacheFormatPath, buf.Bytes(), os.ModePerm) err := ioutil.WriteFile(cacheFormatPath, buf.Bytes(), os.ModePerm)
if err != nil { if err != nil {
if os.IsNotExist(err) {
continue
}
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
continue continue
} }

@ -360,6 +360,9 @@ func (d *dataUsageCache) save(ctx context.Context, store ObjectLayer, name strin
name, name,
NewPutObjReader(r, nil, nil), NewPutObjReader(r, nil, nil),
ObjectOptions{}) ObjectOptions{})
if isErrBucketNotFound(err) {
return nil
}
return err return err
} }

@ -37,17 +37,19 @@ import (
) )
const ( const (
envDataUsageCrawlConf = "MINIO_DISK_USAGE_CRAWL_ENABLE"
envDataUsageCrawlDelay = "MINIO_DISK_USAGE_CRAWL_DELAY"
envDataUsageCrawlDebug = "MINIO_DISK_USAGE_CRAWL_DEBUG"
dataUsageRoot = SlashSeparator
dataUsageBucket = minioMetaBucket + SlashSeparator + bucketMetaPrefix
dataUsageObjName = ".usage.json" dataUsageObjName = ".usage.json"
dataUsageCacheName = ".usage-cache.bin" dataUsageCacheName = ".usage-cache.bin"
envDataUsageCrawlConf = "MINIO_DISK_USAGE_CRAWL_ENABLE" dataUsageBloomName = ".bloomcycle.bin"
envDataUsageCrawlDelay = "MINIO_DISK_USAGE_CRAWL_DELAY"
envDataUsageCrawlDebug = "MINIO_DISK_USAGE_CRAWL_DEBUG"
dataUsageSleepPerFolder = 1 * time.Millisecond dataUsageSleepPerFolder = 1 * time.Millisecond
dataUsageSleepDefMult = 10.0 dataUsageSleepDefMult = 10.0
dataUsageUpdateDirCycles = 16 dataUsageUpdateDirCycles = 16
dataUsageRoot = SlashSeparator
dataUsageBucket = minioMetaBucket + SlashSeparator + bucketMetaPrefix
dataUsageBloomName = ".bloomcycle.bin"
dataUsageStartDelay = 5 * time.Minute // Time to wait on startup and between cycles. dataUsageStartDelay = 5 * time.Minute // Time to wait on startup and between cycles.
) )
@ -104,7 +106,9 @@ func runDataUsageInfo(ctx context.Context, objAPI ObjectLayer) {
} }
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageBloomName, NewPutObjReader(r, nil, nil), ObjectOptions{}) _, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageBloomName, NewPutObjReader(r, nil, nil), ObjectOptions{})
logger.LogIf(ctx, err) if !isErrBucketNotFound(err) {
logger.LogIf(ctx, err)
}
} }
} }
} }
@ -126,7 +130,9 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, gui <-chan
} }
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageObjName, NewPutObjReader(r, nil, nil), ObjectOptions{}) _, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageObjName, NewPutObjReader(r, nil, nil), ObjectOptions{})
logger.LogIf(ctx, err) if !isErrBucketNotFound(err) {
logger.LogIf(ctx, err)
}
} }
} }
@ -135,7 +141,7 @@ func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsag
err := objAPI.GetObject(ctx, dataUsageBucket, dataUsageObjName, 0, -1, &dataUsageInfoJSON, "", ObjectOptions{}) err := objAPI.GetObject(ctx, dataUsageBucket, dataUsageObjName, 0, -1, &dataUsageInfoJSON, "", ObjectOptions{})
if err != nil { if err != nil {
if isErrObjectNotFound(err) { if isErrObjectNotFound(err) || isErrBucketNotFound(err) {
return DataUsageInfo{}, nil return DataUsageInfo{}, nil
} }
return DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName) return DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName)

@ -485,8 +485,8 @@ func (s *posix) SetDiskID(id string) {
func (s *posix) MakeVolBulk(volumes ...string) (err error) { func (s *posix) MakeVolBulk(volumes ...string) (err error) {
for _, volume := range volumes { for _, volume := range volumes {
if err = s.MakeVol(volume); err != nil { if err = s.MakeVol(volume); err != nil {
if err != errVolumeExists { if os.IsPermission(err) {
return err return errVolumeAccessDenied
} }
} }
} }

Loading…
Cancel
Save