fix: re-use er.getDisks() properly in certain calls (#11043)

master
Harshavardhana 4 years ago committed by GitHub
parent 8d036ed6d8
commit ce93b2681b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 6
      cmd/erasure-multipart.go
  2. 8
      cmd/erasure-object.go
  3. 13
      cmd/erasure-sets.go
  4. 7
      cmd/erasure.go

@ -382,8 +382,10 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
return pi, toObjectErr(err, bucket, object, uploadID)
}
storageDisks := er.getDisks()
// Read metadata associated with the object from all disks.
partsMetadata, errs = readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket,
partsMetadata, errs = readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket,
uploadIDPath, "")
// get Quorum for this object
@ -398,7 +400,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
}
// List all online disks.
onlineDisks, modTime := listOnlineDisks(er.getDisks(), partsMetadata, errs)
onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)

@ -608,7 +608,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
}
// Initialize parts metadata
partsMetadata := make([]FileInfo, len(er.getDisks()))
partsMetadata := make([]FileInfo, len(storageDisks))
fi := newFileInfo(object, dataDrives, parityDrives)
@ -767,19 +767,18 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object
// all the disks in parallel, including `xl.meta` associated with the
// object.
func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int) error {
var disks []StorageAPI
var err error
defer ObjectPathUpdated(pathJoin(bucket, object))
tmpObj := mustGetUUID()
disks := er.getDisks()
if bucket == minioMetaTmpBucket {
tmpObj = object
disks = er.getDisks()
} else {
// Rename the current object while requiring write quorum, but also consider
// that a non found object in a given disk as a success since it already
// confirms that the object doesn't have a part in that disk (already removed)
disks, err = rename(ctx, er.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum,
disks, err = rename(ctx, disks, bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum,
[]error{errFileNotFound})
if err != nil {
return toObjectErr(err, bucket, object)
@ -787,7 +786,6 @@ func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string
}
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() error {

@ -388,12 +388,13 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
// Initialize erasure objects for a given set.
s.sets[i] = &erasureObjects{
getDisks: s.GetDisks(i),
getLockers: s.GetLockers(i),
getEndpoints: s.GetEndpoints(i),
nsMutex: mutex,
bp: bp,
mrfOpCh: make(chan partialOperation, 10000),
setDriveCount: setDriveCount,
getDisks: s.GetDisks(i),
getLockers: s.GetLockers(i),
getEndpoints: s.GetEndpoints(i),
nsMutex: mutex,
bp: bp,
mrfOpCh: make(chan partialOperation, 10000),
}
go s.sets[i].cleanupStaleUploads(ctx,

@ -48,6 +48,8 @@ type partialOperation struct {
type erasureObjects struct {
GatewayUnsupported
setDriveCount int
// getDisks returns list of storageAPIs.
getDisks func() []StorageAPI
@ -72,11 +74,6 @@ func (er erasureObjects) NewNSLock(bucket string, objects ...string) RWLocker {
return er.nsMutex.NewNSLock(er.getLockers, bucket, objects...)
}
// SetDriveCount returns the current drives per set.
func (er erasureObjects) SetDriveCount() int {
return len(er.getDisks())
}
// Shutdown function for object storage interface.
func (er erasureObjects) Shutdown(ctx context.Context) error {
// Add any object layer shutdown activities here.

Loading…
Cancel
Save