|
|
@ -48,7 +48,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Not using path.Join() as it strips off the trailing '/'.
|
|
|
|
// Not using path.Join() as it strips off the trailing '/'.
|
|
|
|
multipartPrefixPath := pathJoin(mpartMetaPrefix, bucket, prefix) |
|
|
|
multipartPrefixPath := pathJoin(bucket, prefix) |
|
|
|
if prefix == "" { |
|
|
|
if prefix == "" { |
|
|
|
// Should have a trailing "/" if prefix is ""
|
|
|
|
// Should have a trailing "/" if prefix is ""
|
|
|
|
// For ex. multipartPrefixPath should be "multipart/bucket/" if prefix is ""
|
|
|
|
// For ex. multipartPrefixPath should be "multipart/bucket/" if prefix is ""
|
|
|
@ -56,7 +56,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark |
|
|
|
} |
|
|
|
} |
|
|
|
multipartMarkerPath := "" |
|
|
|
multipartMarkerPath := "" |
|
|
|
if keyMarker != "" { |
|
|
|
if keyMarker != "" { |
|
|
|
multipartMarkerPath = pathJoin(mpartMetaPrefix, bucket, keyMarker) |
|
|
|
multipartMarkerPath = pathJoin(bucket, keyMarker) |
|
|
|
} |
|
|
|
} |
|
|
|
var uploads []uploadMetadata |
|
|
|
var uploads []uploadMetadata |
|
|
|
var err error |
|
|
|
var err error |
|
|
@ -65,8 +65,8 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark |
|
|
|
// uploadIDMarker first.
|
|
|
|
// uploadIDMarker first.
|
|
|
|
if uploadIDMarker != "" { |
|
|
|
if uploadIDMarker != "" { |
|
|
|
// hold lock on keyMarker path
|
|
|
|
// hold lock on keyMarker path
|
|
|
|
keyMarkerLock := nsMutex.NewNSLock(minioMetaBucket, |
|
|
|
keyMarkerLock := nsMutex.NewNSLock(minioMetaMultipartBucket, |
|
|
|
pathJoin(mpartMetaPrefix, bucket, keyMarker)) |
|
|
|
pathJoin(bucket, keyMarker)) |
|
|
|
keyMarkerLock.RLock() |
|
|
|
keyMarkerLock.RLock() |
|
|
|
for _, disk := range xl.getLoadBalancedDisks() { |
|
|
|
for _, disk := range xl.getLoadBalancedDisks() { |
|
|
|
if disk == nil { |
|
|
|
if disk == nil { |
|
|
@ -92,12 +92,12 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark |
|
|
|
heal := false // true only for xl.ListObjectsHeal
|
|
|
|
heal := false // true only for xl.ListObjectsHeal
|
|
|
|
// Validate if we need to list further depending on maxUploads.
|
|
|
|
// Validate if we need to list further depending on maxUploads.
|
|
|
|
if maxUploads > 0 { |
|
|
|
if maxUploads > 0 { |
|
|
|
walkerCh, walkerDoneCh = xl.listPool.Release(listParams{minioMetaBucket, recursive, multipartMarkerPath, multipartPrefixPath, heal}) |
|
|
|
walkerCh, walkerDoneCh = xl.listPool.Release(listParams{minioMetaMultipartBucket, recursive, multipartMarkerPath, multipartPrefixPath, heal}) |
|
|
|
if walkerCh == nil { |
|
|
|
if walkerCh == nil { |
|
|
|
walkerDoneCh = make(chan struct{}) |
|
|
|
walkerDoneCh = make(chan struct{}) |
|
|
|
isLeaf := xl.isMultipartUpload |
|
|
|
isLeaf := xl.isMultipartUpload |
|
|
|
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, xl.getLoadBalancedDisks()...) |
|
|
|
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, xl.getLoadBalancedDisks()...) |
|
|
|
walkerCh = startTreeWalk(minioMetaBucket, multipartPrefixPath, multipartMarkerPath, recursive, listDir, isLeaf, walkerDoneCh) |
|
|
|
walkerCh = startTreeWalk(minioMetaMultipartBucket, multipartPrefixPath, multipartMarkerPath, recursive, listDir, isLeaf, walkerDoneCh) |
|
|
|
} |
|
|
|
} |
|
|
|
// Collect uploads until we have reached maxUploads count to 0.
|
|
|
|
// Collect uploads until we have reached maxUploads count to 0.
|
|
|
|
for maxUploads > 0 { |
|
|
|
for maxUploads > 0 { |
|
|
@ -115,7 +115,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark |
|
|
|
} |
|
|
|
} |
|
|
|
return ListMultipartsInfo{}, err |
|
|
|
return ListMultipartsInfo{}, err |
|
|
|
} |
|
|
|
} |
|
|
|
entry := strings.TrimPrefix(walkResult.entry, retainSlash(pathJoin(mpartMetaPrefix, bucket))) |
|
|
|
entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket)) |
|
|
|
// For an entry looking like a directory, store and
|
|
|
|
// For an entry looking like a directory, store and
|
|
|
|
// continue the loop not need to fetch uploads.
|
|
|
|
// continue the loop not need to fetch uploads.
|
|
|
|
if strings.HasSuffix(walkResult.entry, slashSeparator) { |
|
|
|
if strings.HasSuffix(walkResult.entry, slashSeparator) { |
|
|
@ -135,8 +135,8 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark |
|
|
|
|
|
|
|
|
|
|
|
// For the new object entry we get all its
|
|
|
|
// For the new object entry we get all its
|
|
|
|
// pending uploadIDs.
|
|
|
|
// pending uploadIDs.
|
|
|
|
entryLock := nsMutex.NewNSLock(minioMetaBucket, |
|
|
|
entryLock := nsMutex.NewNSLock(minioMetaMultipartBucket, |
|
|
|
pathJoin(mpartMetaPrefix, bucket, entry)) |
|
|
|
pathJoin(bucket, entry)) |
|
|
|
entryLock.RLock() |
|
|
|
entryLock.RLock() |
|
|
|
var disk StorageAPI |
|
|
|
var disk StorageAPI |
|
|
|
for _, disk = range xl.getLoadBalancedDisks() { |
|
|
|
for _, disk = range xl.getLoadBalancedDisks() { |
|
|
@ -281,13 +281,13 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st |
|
|
|
|
|
|
|
|
|
|
|
// This lock needs to be held for any changes to the directory
|
|
|
|
// This lock needs to be held for any changes to the directory
|
|
|
|
// contents of ".minio.sys/multipart/object/"
|
|
|
|
// contents of ".minio.sys/multipart/object/"
|
|
|
|
objectMPartPathLock := nsMutex.NewNSLock(minioMetaBucket, |
|
|
|
objectMPartPathLock := nsMutex.NewNSLock(minioMetaMultipartBucket, |
|
|
|
pathJoin(mpartMetaPrefix, bucket, object)) |
|
|
|
pathJoin(bucket, object)) |
|
|
|
objectMPartPathLock.Lock() |
|
|
|
objectMPartPathLock.Lock() |
|
|
|
defer objectMPartPathLock.Unlock() |
|
|
|
defer objectMPartPathLock.Unlock() |
|
|
|
|
|
|
|
|
|
|
|
uploadID := getUUID() |
|
|
|
uploadID := getUUID() |
|
|
|
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID) |
|
|
|
uploadIDPath := path.Join(bucket, object, uploadID) |
|
|
|
tempUploadIDPath := uploadID |
|
|
|
tempUploadIDPath := uploadID |
|
|
|
// Write updated `xl.json` to all disks.
|
|
|
|
// Write updated `xl.json` to all disks.
|
|
|
|
if err := writeSameXLMetadata(xl.storageDisks, minioMetaTmpBucket, tempUploadIDPath, xlMeta, xl.writeQuorum, xl.readQuorum); err != nil { |
|
|
|
if err := writeSameXLMetadata(xl.storageDisks, minioMetaTmpBucket, tempUploadIDPath, xlMeta, xl.writeQuorum, xl.readQuorum); err != nil { |
|
|
@ -300,8 +300,8 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st |
|
|
|
|
|
|
|
|
|
|
|
// Attempt to rename temp upload object to actual upload path
|
|
|
|
// Attempt to rename temp upload object to actual upload path
|
|
|
|
// object
|
|
|
|
// object
|
|
|
|
if rErr := renameObject(xl.storageDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaBucket, uploadIDPath, xl.writeQuorum); rErr != nil { |
|
|
|
if rErr := renameObject(xl.storageDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum); rErr != nil { |
|
|
|
return "", toObjectErr(rErr, minioMetaBucket, uploadIDPath) |
|
|
|
return "", toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
initiated := time.Now().UTC() |
|
|
|
initiated := time.Now().UTC() |
|
|
@ -358,10 +358,10 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s |
|
|
|
|
|
|
|
|
|
|
|
var partsMetadata []xlMetaV1 |
|
|
|
var partsMetadata []xlMetaV1 |
|
|
|
var errs []error |
|
|
|
var errs []error |
|
|
|
uploadIDPath := pathJoin(mpartMetaPrefix, bucket, object, uploadID) |
|
|
|
uploadIDPath := pathJoin(bucket, object, uploadID) |
|
|
|
|
|
|
|
|
|
|
|
// pre-check upload id lock.
|
|
|
|
// pre-check upload id lock.
|
|
|
|
preUploadIDLock := nsMutex.NewNSLock(minioMetaBucket, uploadIDPath) |
|
|
|
preUploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath) |
|
|
|
preUploadIDLock.RLock() |
|
|
|
preUploadIDLock.RLock() |
|
|
|
// Validates if upload ID exists.
|
|
|
|
// Validates if upload ID exists.
|
|
|
|
if !xl.isUploadIDExists(bucket, object, uploadID) { |
|
|
|
if !xl.isUploadIDExists(bucket, object, uploadID) { |
|
|
@ -369,7 +369,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s |
|
|
|
return "", traceError(InvalidUploadID{UploadID: uploadID}) |
|
|
|
return "", traceError(InvalidUploadID{UploadID: uploadID}) |
|
|
|
} |
|
|
|
} |
|
|
|
// Read metadata associated with the object from all disks.
|
|
|
|
// Read metadata associated with the object from all disks.
|
|
|
|
partsMetadata, errs = readAllXLMetadata(xl.storageDisks, minioMetaBucket, |
|
|
|
partsMetadata, errs = readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket, |
|
|
|
uploadIDPath) |
|
|
|
uploadIDPath) |
|
|
|
if !isDiskQuorum(errs, xl.writeQuorum) { |
|
|
|
if !isDiskQuorum(errs, xl.writeQuorum) { |
|
|
|
preUploadIDLock.RUnlock() |
|
|
|
preUploadIDLock.RUnlock() |
|
|
@ -471,7 +471,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// post-upload check (write) lock
|
|
|
|
// post-upload check (write) lock
|
|
|
|
postUploadIDLock := nsMutex.NewNSLock(minioMetaBucket, uploadIDPath) |
|
|
|
postUploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath) |
|
|
|
postUploadIDLock.Lock() |
|
|
|
postUploadIDLock.Lock() |
|
|
|
defer postUploadIDLock.Unlock() |
|
|
|
defer postUploadIDLock.Unlock() |
|
|
|
|
|
|
|
|
|
|
@ -482,13 +482,13 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s |
|
|
|
|
|
|
|
|
|
|
|
// Rename temporary part file to its final location.
|
|
|
|
// Rename temporary part file to its final location.
|
|
|
|
partPath := path.Join(uploadIDPath, partSuffix) |
|
|
|
partPath := path.Join(uploadIDPath, partSuffix) |
|
|
|
err = renamePart(onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaBucket, partPath, xl.writeQuorum) |
|
|
|
err = renamePart(onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, xl.writeQuorum) |
|
|
|
if err != nil { |
|
|
|
if err != nil { |
|
|
|
return "", toObjectErr(err, minioMetaBucket, partPath) |
|
|
|
return "", toObjectErr(err, minioMetaMultipartBucket, partPath) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Read metadata again because it might be updated with parallel upload of another part.
|
|
|
|
// Read metadata again because it might be updated with parallel upload of another part.
|
|
|
|
partsMetadata, errs = readAllXLMetadata(onlineDisks, minioMetaBucket, uploadIDPath) |
|
|
|
partsMetadata, errs = readAllXLMetadata(onlineDisks, minioMetaMultipartBucket, uploadIDPath) |
|
|
|
if !isDiskQuorum(errs, xl.writeQuorum) { |
|
|
|
if !isDiskQuorum(errs, xl.writeQuorum) { |
|
|
|
return "", toObjectErr(traceError(errXLWriteQuorum), bucket, object) |
|
|
|
return "", toObjectErr(traceError(errXLWriteQuorum), bucket, object) |
|
|
|
} |
|
|
|
} |
|
|
@ -528,9 +528,9 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s |
|
|
|
if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, xl.writeQuorum); err != nil { |
|
|
|
if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, xl.writeQuorum); err != nil { |
|
|
|
return "", toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath) |
|
|
|
return "", toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath) |
|
|
|
} |
|
|
|
} |
|
|
|
rErr := commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaBucket, uploadIDPath, xl.writeQuorum) |
|
|
|
rErr := commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum) |
|
|
|
if rErr != nil { |
|
|
|
if rErr != nil { |
|
|
|
return "", toObjectErr(rErr, minioMetaBucket, uploadIDPath) |
|
|
|
return "", toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Return success.
|
|
|
|
// Return success.
|
|
|
@ -542,11 +542,11 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s |
|
|
|
func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) { |
|
|
|
func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) { |
|
|
|
result := ListPartsInfo{} |
|
|
|
result := ListPartsInfo{} |
|
|
|
|
|
|
|
|
|
|
|
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID) |
|
|
|
uploadIDPath := path.Join(bucket, object, uploadID) |
|
|
|
|
|
|
|
|
|
|
|
xlParts, err := xl.readXLMetaParts(minioMetaBucket, uploadIDPath) |
|
|
|
xlParts, err := xl.readXLMetaParts(minioMetaMultipartBucket, uploadIDPath) |
|
|
|
if err != nil { |
|
|
|
if err != nil { |
|
|
|
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, uploadIDPath) |
|
|
|
return ListPartsInfo{}, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Populate the result stub.
|
|
|
|
// Populate the result stub.
|
|
|
@ -622,8 +622,8 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM |
|
|
|
|
|
|
|
|
|
|
|
// Hold lock so that there is no competing
|
|
|
|
// Hold lock so that there is no competing
|
|
|
|
// abort-multipart-upload or complete-multipart-upload.
|
|
|
|
// abort-multipart-upload or complete-multipart-upload.
|
|
|
|
uploadIDLock := nsMutex.NewNSLock(minioMetaBucket, |
|
|
|
uploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket, |
|
|
|
pathJoin(mpartMetaPrefix, bucket, object, uploadID)) |
|
|
|
pathJoin(bucket, object, uploadID)) |
|
|
|
uploadIDLock.Lock() |
|
|
|
uploadIDLock.Lock() |
|
|
|
defer uploadIDLock.Unlock() |
|
|
|
defer uploadIDLock.Unlock() |
|
|
|
|
|
|
|
|
|
|
@ -662,8 +662,8 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload |
|
|
|
//
|
|
|
|
//
|
|
|
|
// 2) no one does a parallel complete-multipart-upload on this
|
|
|
|
// 2) no one does a parallel complete-multipart-upload on this
|
|
|
|
// multipart upload
|
|
|
|
// multipart upload
|
|
|
|
uploadIDLock := nsMutex.NewNSLock(minioMetaBucket, |
|
|
|
uploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket, |
|
|
|
pathJoin(mpartMetaPrefix, bucket, object, uploadID)) |
|
|
|
pathJoin(bucket, object, uploadID)) |
|
|
|
uploadIDLock.Lock() |
|
|
|
uploadIDLock.Lock() |
|
|
|
defer uploadIDLock.Unlock() |
|
|
|
defer uploadIDLock.Unlock() |
|
|
|
|
|
|
|
|
|
|
@ -676,10 +676,10 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload |
|
|
|
return "", err |
|
|
|
return "", err |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
uploadIDPath := pathJoin(mpartMetaPrefix, bucket, object, uploadID) |
|
|
|
uploadIDPath := pathJoin(bucket, object, uploadID) |
|
|
|
|
|
|
|
|
|
|
|
// Read metadata associated with the object from all disks.
|
|
|
|
// Read metadata associated with the object from all disks.
|
|
|
|
partsMetadata, errs := readAllXLMetadata(xl.storageDisks, minioMetaBucket, uploadIDPath) |
|
|
|
partsMetadata, errs := readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket, uploadIDPath) |
|
|
|
// Do we have writeQuorum?.
|
|
|
|
// Do we have writeQuorum?.
|
|
|
|
if !isDiskQuorum(errs, xl.writeQuorum) { |
|
|
|
if !isDiskQuorum(errs, xl.writeQuorum) { |
|
|
|
return "", toObjectErr(traceError(errXLWriteQuorum), bucket, object) |
|
|
|
return "", toObjectErr(traceError(errXLWriteQuorum), bucket, object) |
|
|
@ -760,7 +760,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload |
|
|
|
|
|
|
|
|
|
|
|
// Save successfully calculated md5sum.
|
|
|
|
// Save successfully calculated md5sum.
|
|
|
|
xlMeta.Meta["md5Sum"] = s3MD5 |
|
|
|
xlMeta.Meta["md5Sum"] = s3MD5 |
|
|
|
uploadIDPath = path.Join(mpartMetaPrefix, bucket, object, uploadID) |
|
|
|
uploadIDPath = path.Join(bucket, object, uploadID) |
|
|
|
tempUploadIDPath := uploadID |
|
|
|
tempUploadIDPath := uploadID |
|
|
|
|
|
|
|
|
|
|
|
// Update all xl metadata, make sure to not modify fields like
|
|
|
|
// Update all xl metadata, make sure to not modify fields like
|
|
|
@ -775,9 +775,9 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload |
|
|
|
if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, xl.writeQuorum); err != nil { |
|
|
|
if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, xl.writeQuorum); err != nil { |
|
|
|
return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath) |
|
|
|
return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath) |
|
|
|
} |
|
|
|
} |
|
|
|
rErr := commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaBucket, uploadIDPath, xl.writeQuorum) |
|
|
|
rErr := commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum) |
|
|
|
if rErr != nil { |
|
|
|
if rErr != nil { |
|
|
|
return "", toObjectErr(rErr, minioMetaBucket, uploadIDPath) |
|
|
|
return "", toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Hold write lock on the destination before rename.
|
|
|
|
// Hold write lock on the destination before rename.
|
|
|
@ -823,7 +823,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Rename the multipart object to final location.
|
|
|
|
// Rename the multipart object to final location.
|
|
|
|
if err = renameObject(onlineDisks, minioMetaBucket, uploadIDPath, bucket, object, xl.writeQuorum); err != nil { |
|
|
|
if err = renameObject(onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, xl.writeQuorum); err != nil { |
|
|
|
return "", toObjectErr(err, bucket, object) |
|
|
|
return "", toObjectErr(err, bucket, object) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -833,14 +833,14 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload |
|
|
|
// Hold the lock so that two parallel
|
|
|
|
// Hold the lock so that two parallel
|
|
|
|
// complete-multipart-uploads do not leave a stale
|
|
|
|
// complete-multipart-uploads do not leave a stale
|
|
|
|
// uploads.json behind.
|
|
|
|
// uploads.json behind.
|
|
|
|
objectMPartPathLock := nsMutex.NewNSLock(minioMetaBucket, |
|
|
|
objectMPartPathLock := nsMutex.NewNSLock(minioMetaMultipartBucket, |
|
|
|
pathJoin(mpartMetaPrefix, bucket, object)) |
|
|
|
pathJoin(bucket, object)) |
|
|
|
objectMPartPathLock.Lock() |
|
|
|
objectMPartPathLock.Lock() |
|
|
|
defer objectMPartPathLock.Unlock() |
|
|
|
defer objectMPartPathLock.Unlock() |
|
|
|
|
|
|
|
|
|
|
|
// remove entry from uploads.json with quorum
|
|
|
|
// remove entry from uploads.json with quorum
|
|
|
|
if err = xl.updateUploadJSON(bucket, object, uploadIDChange{uploadID: uploadID, isRemove: true}); err != nil { |
|
|
|
if err = xl.updateUploadJSON(bucket, object, uploadIDChange{uploadID: uploadID, isRemove: true}); err != nil { |
|
|
|
return "", toObjectErr(err, minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object)) |
|
|
|
return "", toObjectErr(err, minioMetaMultipartBucket, path.Join(bucket, object)) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Return md5sum.
|
|
|
|
// Return md5sum.
|
|
|
@ -859,8 +859,8 @@ func (xl xlObjects) abortMultipartUpload(bucket, object, uploadID string) (err e |
|
|
|
|
|
|
|
|
|
|
|
// hold lock so we don't compete with a complete, or abort
|
|
|
|
// hold lock so we don't compete with a complete, or abort
|
|
|
|
// multipart request.
|
|
|
|
// multipart request.
|
|
|
|
objectMPartPathLock := nsMutex.NewNSLock(minioMetaBucket, |
|
|
|
objectMPartPathLock := nsMutex.NewNSLock(minioMetaMultipartBucket, |
|
|
|
pathJoin(mpartMetaPrefix, bucket, object)) |
|
|
|
pathJoin(bucket, object)) |
|
|
|
objectMPartPathLock.Lock() |
|
|
|
objectMPartPathLock.Lock() |
|
|
|
defer objectMPartPathLock.Unlock() |
|
|
|
defer objectMPartPathLock.Unlock() |
|
|
|
|
|
|
|
|
|
|
@ -898,8 +898,8 @@ func (xl xlObjects) AbortMultipartUpload(bucket, object, uploadID string) error |
|
|
|
|
|
|
|
|
|
|
|
// Hold lock so that there is no competing
|
|
|
|
// Hold lock so that there is no competing
|
|
|
|
// complete-multipart-upload or put-object-part.
|
|
|
|
// complete-multipart-upload or put-object-part.
|
|
|
|
uploadIDLock := nsMutex.NewNSLock(minioMetaBucket, |
|
|
|
uploadIDLock := nsMutex.NewNSLock(minioMetaMultipartBucket, |
|
|
|
pathJoin(mpartMetaPrefix, bucket, object, uploadID)) |
|
|
|
pathJoin(bucket, object, uploadID)) |
|
|
|
uploadIDLock.Lock() |
|
|
|
uploadIDLock.Lock() |
|
|
|
defer uploadIDLock.Unlock() |
|
|
|
defer uploadIDLock.Unlock() |
|
|
|
|
|
|
|
|
|
|
|