Do not use parallel deletes to avoid random I/O (#6178)

The current code for deleting 1000 objects simultaneously
causes significant random I/O, which on slower drives
leads to servers disconnecting in a distributed setup.

Simplify this by serially deleting and reducing the
chattiness of this operation.
master
Harshavardhana 6 years ago committed by kannappanr
parent 76ddf4d32f
commit f1be356cc6
  1. 39
      cmd/bucket-handlers.go

@ -28,7 +28,6 @@ import (
"path"
"path/filepath"
"strings"
"sync"
"github.com/gorilla/mux"
@ -311,34 +310,24 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
return
}
var wg = &sync.WaitGroup{} // Allocate a new wait group.
var dErrs = make([]error, len(deleteObjects.Objects))
deleteObject := objectAPI.DeleteObject
if api.CacheAPI() != nil {
deleteObject = api.CacheAPI().DeleteObject
}
// Delete all requested objects in parallel.
var dErrs = make([]error, len(deleteObjects.Objects))
for index, object := range deleteObjects.Objects {
wg.Add(1)
go func(i int, obj ObjectIdentifier) {
defer wg.Done()
// If the request is denied access, each item
// should be marked as 'AccessDenied'
if s3Error == ErrAccessDenied {
dErrs[i] = PrefixAccessDenied{
Bucket: bucket,
Object: obj.ObjectName,
}
return
}
deleteObject := objectAPI.DeleteObject
if api.CacheAPI() != nil {
deleteObject = api.CacheAPI().DeleteObject
}
dErr := deleteObject(ctx, bucket, obj.ObjectName)
if dErr != nil {
dErrs[i] = dErr
// If the request is denied access, each item
// should be marked as 'AccessDenied'
if s3Error == ErrAccessDenied {
dErrs[index] = PrefixAccessDenied{
Bucket: bucket,
Object: object.ObjectName,
}
}(index, object)
continue
}
dErrs[index] = deleteObject(ctx, bucket, object.ObjectName)
}
wg.Wait()
// Collect deleted objects and errors if any.
var deletedObjects []ObjectIdentifier

Loading…
Cancel
Save