XL: Operations on `uploads.json` should cater for disk being unavailable. (#2277)

master
Harshavardhana 9 years ago committed by GitHub
parent 7e5a78985d
commit 79bab6b561
  1. 5
      xl-v1-bucket.go
  2. 1
      xl-v1-metadata.go
  3. 56
      xl-v1-multipart-common.go
  4. 32
      xl-v1-multipart.go

@ -216,6 +216,11 @@ func (xl xlObjects) listBuckets() (bucketsInfo []BucketInfo, err error) {
Created: volInfo.Created, Created: volInfo.Created,
}) })
} }
// For buckets info empty, loop once again to check
// if we have, can happen if disks are down.
if len(bucketsInfo) == 0 {
continue
}
return bucketsInfo, nil return bucketsInfo, nil
} }
// Ignore any disks not found. // Ignore any disks not found.

@ -206,6 +206,7 @@ var objMetadataOpIgnoredErrs = []error{
errFaultyDisk, errFaultyDisk,
errVolumeNotFound, errVolumeNotFound,
errFileAccessDenied, errFileAccessDenied,
errFileNotFound,
} }
// readXLMetadata - returns the object metadata `xl.json` content from // readXLMetadata - returns the object metadata `xl.json` content from

@ -87,6 +87,24 @@ func (xl xlObjects) updateUploadsJSON(bucket, object string, uploadsJSON uploads
return nil return nil
} }
// Reads uploads.json from any of the load balanced disks.
func (xl xlObjects) readUploadsJSON(bucket, object string) (uploadsJSON uploadsV1, err error) {
for _, disk := range xl.getLoadBalancedDisks() {
if disk == nil {
continue
}
uploadsJSON, err = readUploadsJSON(bucket, object, disk)
if err == nil {
return uploadsJSON, nil
}
if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue
}
break
}
return uploadsV1{}, err
}
// writeUploadJSON - create `uploads.json` or update it with new uploadID. // writeUploadJSON - create `uploads.json` or update it with new uploadID.
func (xl xlObjects) writeUploadJSON(bucket, object, uploadID string, initiated time.Time) (err error) { func (xl xlObjects) writeUploadJSON(bucket, object, uploadID string, initiated time.Time) (err error) {
uploadsPath := path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile) uploadsPath := path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile)
@ -96,16 +114,9 @@ func (xl xlObjects) writeUploadJSON(bucket, object, uploadID string, initiated t
var errs = make([]error, len(xl.storageDisks)) var errs = make([]error, len(xl.storageDisks))
var wg = &sync.WaitGroup{} var wg = &sync.WaitGroup{}
var uploadsJSON uploadsV1 // Reads `uploads.json` and returns error.
for _, disk := range xl.storageDisks { uploadsJSON, err := xl.readUploadsJSON(bucket, object)
if disk == nil {
continue
}
uploadsJSON, err = readUploadsJSON(bucket, object, disk)
break
}
if err != nil { if err != nil {
// For any other errors.
if err != errFileNotFound { if err != errFileNotFound {
return err return err
} }
@ -151,12 +162,8 @@ func (xl xlObjects) writeUploadJSON(bucket, object, uploadID string, initiated t
// Wait here for all the writes to finish. // Wait here for all the writes to finish.
wg.Wait() wg.Wait()
// Count all the errors and validate if we have write quorum. // Do we have write quorum?.
if !isDiskQuorum(errs, xl.writeQuorum) { if !isDiskQuorum(errs, xl.writeQuorum) {
// Do we have readQuorum?.
if isDiskQuorum(errs, xl.readQuorum) {
return nil
}
// Rename `uploads.json` left over back to tmp location. // Rename `uploads.json` left over back to tmp location.
for index, disk := range xl.storageDisks { for index, disk := range xl.storageDisks {
if disk == nil { if disk == nil {
@ -175,7 +182,14 @@ func (xl xlObjects) writeUploadJSON(bucket, object, uploadID string, initiated t
wg.Wait() wg.Wait()
return errXLWriteQuorum return errXLWriteQuorum
} }
return nil
// Ignored errors list.
ignoredErrs := []error{
errDiskNotFound,
errFaultyDisk,
errDiskAccessDenied,
}
return reduceErrs(errs, ignoredErrs)
} }
// Returns if the prefix is a multipart upload. // Returns if the prefix is a multipart upload.
@ -234,10 +248,13 @@ func (xl xlObjects) statPart(bucket, object, uploadID, partName string) (fileInf
if err == nil { if err == nil {
return fileInfo, nil return fileInfo, nil
} }
// For any reason disk was deleted or goes offline, continue
// For any reason disk was deleted or goes offline we continue to next disk.
if isErrIgnored(err, objMetadataOpIgnoredErrs) { if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue continue
} }
// Catastrophic error, we return.
break break
} }
return FileInfo{}, err return FileInfo{}, err
@ -283,10 +300,11 @@ func commitXLMetadata(disks []StorageAPI, srcPrefix, dstPrefix string, quorum in
return errXLWriteQuorum return errXLWriteQuorum
} }
return reduceErrs(mErrs, []error{ // List of ignored errors.
ignoredErrs := []error{
errDiskNotFound, errDiskNotFound,
errDiskAccessDenied, errDiskAccessDenied,
errFaultyDisk, errFaultyDisk,
errVolumeNotFound, }
}) return reduceErrs(mErrs, ignoredErrs)
} }

@ -755,21 +755,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// Validate if there are other incomplete upload-id's present for // Validate if there are other incomplete upload-id's present for
// the object, if yes do not attempt to delete 'uploads.json'. // the object, if yes do not attempt to delete 'uploads.json'.
var disk StorageAPI uploadsJSON, err := xl.readUploadsJSON(bucket, object)
var uploadsJSON uploadsV1
for _, disk = range xl.getLoadBalancedDisks() {
if disk == nil {
continue
}
uploadsJSON, err = readUploadsJSON(bucket, object, disk)
if err == nil {
break
}
if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue
}
break
}
if err != nil { if err != nil {
return "", toObjectErr(err, minioMetaBucket, object) return "", toObjectErr(err, minioMetaBucket, object)
} }
@ -809,21 +795,7 @@ func (xl xlObjects) abortMultipartUpload(bucket, object, uploadID string) (err e
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object)) defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object))
// Validate if there are other incomplete upload-id's present for // Validate if there are other incomplete upload-id's present for
// the object, if yes do not attempt to delete 'uploads.json'. // the object, if yes do not attempt to delete 'uploads.json'.
var disk StorageAPI uploadsJSON, err := xl.readUploadsJSON(bucket, object)
var uploadsJSON uploadsV1
for _, disk = range xl.getLoadBalancedDisks() {
if disk == nil {
continue
}
uploadsJSON, err = readUploadsJSON(bucket, object, disk)
if err == nil {
break
}
if isErrIgnored(err, objMetadataOpIgnoredErrs) {
continue
}
break
}
if err != nil { if err != nil {
return toObjectErr(err, bucket, object) return toObjectErr(err, bucket, object)
} }

Loading…
Cancel
Save