From 9439dfef64b41563622601afa0e4f37fef7eae66 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Fri, 4 May 2018 10:43:20 -0700 Subject: [PATCH] Use defer style to stop tickers to avoid current/possible misuse (#5883) This commit ensures that all tickers are stopped using defer ticker.Stop() style. This will also fix one bug seen when a client starts to listen to event notifications and that case will result a leak in tickers. --- cmd/admin-handlers.go | 2 +- cmd/disk-cache-fs.go | 4 ++-- cmd/fs-v1-multipart.go | 4 ++-- cmd/lock-rpc-server.go | 4 ++-- cmd/xl-sets.go | 7 +++---- cmd/xl-v1-multipart.go | 2 +- pkg/event/target/httpclient.go | 4 +++- 7 files changed, 14 insertions(+), 13 deletions(-) diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index a463ba40a..d67a809e6 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -500,6 +500,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { // after 10s unless a response item comes in keepConnLive := func(w http.ResponseWriter, respCh chan healResp) { ticker := time.NewTicker(time.Second * 10) + defer ticker.Stop() started := false forLoop: for { @@ -528,7 +529,6 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { break forLoop } } - ticker.Stop() } // find number of disks in the setup diff --git a/cmd/disk-cache-fs.go b/cmd/disk-cache-fs.go index e7702a6ff..4650d9fdd 100644 --- a/cmd/disk-cache-fs.go +++ b/cmd/disk-cache-fs.go @@ -155,11 +155,11 @@ func (cfs *cacheFSObjects) diskAvailable(size int64) bool { // purges all content marked trash from the cache. func (cfs *cacheFSObjects) purgeTrash() { ticker := time.NewTicker(time.Minute * cacheCleanupInterval) + defer ticker.Stop() + for { select { case <-globalServiceDoneCh: - // Stop the timer. - ticker.Stop() return case <-ticker.C: trashPath := path.Join(cfs.fsPath, minioMetaBucket, cacheTrashDir) diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index fa468c84c..7cda9f2a9 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -714,11 +714,11 @@ func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, u // blocking and should be run in a go-routine. func (fs *FSObjects) cleanupStaleMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh chan struct{}) { ticker := time.NewTicker(cleanupInterval) + defer ticker.Stop() + for { select { case <-doneCh: - // Stop the timer. - ticker.Stop() return case <-ticker.C: now := time.Now() diff --git a/cmd/lock-rpc-server.go b/cmd/lock-rpc-server.go index 7cd324eeb..3da4afae7 100644 --- a/cmd/lock-rpc-server.go +++ b/cmd/lock-rpc-server.go @@ -69,6 +69,8 @@ func startLockMaintenance(lkSrv *lockServer) { go func(lk *lockServer) { // Initialize a new ticker with a minute between each ticks. ticker := time.NewTicker(lockMaintenanceInterval) + // Stop the timer upon service closure and cleanup the go-routine. + defer ticker.Stop() // Start with random sleep time, so as to avoid "synchronous checks" between servers time.Sleep(time.Duration(rand.Float64() * float64(lockMaintenanceInterval))) @@ -76,8 +78,6 @@ func startLockMaintenance(lkSrv *lockServer) { // Verifies every minute for locks held more than 2minutes. select { case <-globalServiceDoneCh: - // Stop the timer upon service closure and cleanup the go-routine. - ticker.Stop() return case <-ticker.C: lk.lockMaintenance(lockValidityCheckInterval) diff --git a/cmd/xl-sets.go b/cmd/xl-sets.go index 25314f99a..ebc0e4d42 100644 --- a/cmd/xl-sets.go +++ b/cmd/xl-sets.go @@ -198,15 +198,14 @@ func (s *xlSets) connectDisks() { // the set topology, this monitoring happens at a given monitoring interval. func (s *xlSets) monitorAndConnectEndpoints(monitorInterval time.Duration) { ticker := time.NewTicker(monitorInterval) + // Stop the timer. + defer ticker.Stop() + for { select { case <-globalServiceDoneCh: - // Stop the timer. - ticker.Stop() return case <-s.disksConnectDoneCh: - // Stop the timer. - ticker.Stop() return case <-ticker.C: s.connectDisks() diff --git a/cmd/xl-v1-multipart.go b/cmd/xl-v1-multipart.go index a02437793..0cff64284 100644 --- a/cmd/xl-v1-multipart.go +++ b/cmd/xl-v1-multipart.go @@ -837,11 +837,11 @@ func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, up // Clean-up the old multipart uploads. Should be run in a Go routine. func (xl xlObjects) cleanupStaleMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh chan struct{}) { ticker := time.NewTicker(cleanupInterval) + defer ticker.Stop() for { select { case <-doneCh: - ticker.Stop() return case <-ticker.C: var disk StorageAPI diff --git a/pkg/event/target/httpclient.go b/pkg/event/target/httpclient.go index 32101776a..90f68a0e9 100644 --- a/pkg/event/target/httpclient.go +++ b/pkg/event/target/httpclient.go @@ -62,8 +62,10 @@ func (target *HTTPClientTarget) start() { return nil } + keepAliveTicker := time.NewTicker(500 * time.Millisecond) + defer keepAliveTicker.Stop() + for { - keepAliveTicker := time.NewTicker(500 * time.Millisecond) select { case <-target.stopCh: // We are asked to stop.