FS/multipart: Bug fix related to part path. Hold lock on part while appending. (#3335)

master
Krishna Srinivas 8 years ago committed by Harshavardhana
parent 1983925dcf
commit f4f512fedd
  1. 18
      cmd/fs-v1-background-append.go
  2. 17
      cmd/fs-v1-multipart.go

@ -56,7 +56,7 @@ type bgAppendPartsInfo struct {
} }
// Called after a part is uploaded so that it can be appended in the background. // Called after a part is uploaded so that it can be appended in the background.
func (b *backgroundAppend) append(disk StorageAPI, bucket, object, uploadID string, meta fsMetaV1) { func (b *backgroundAppend) append(disk StorageAPI, bucket, object, uploadID string, meta fsMetaV1) chan error {
b.Lock() b.Lock()
info, ok := b.infoMap[uploadID] info, ok := b.infoMap[uploadID]
if !ok { if !ok {
@ -73,21 +73,19 @@ func (b *backgroundAppend) append(disk StorageAPI, bucket, object, uploadID stri
go b.appendParts(disk, bucket, object, uploadID, info) go b.appendParts(disk, bucket, object, uploadID, info)
} }
b.Unlock() b.Unlock()
errCh := make(chan error)
go func() { go func() {
errCh := make(chan error)
// send input in a goroutine as send on the inputCh can block if appendParts go-routine // send input in a goroutine as send on the inputCh can block if appendParts go-routine
// is busy appending a part. // is busy appending a part.
select { select {
case <-info.timeoutCh: case <-info.timeoutCh:
// This is to handle a rare race condition where we found info in b.infoMap // This is to handle a rare race condition where we found info in b.infoMap
// but soon after that appendParts go-routine timed out. // but soon after that appendParts go-routine timed out.
errCh <- errAppendPartsTimeout
case info.inputCh <- bgAppendPartsInput{meta, errCh}: case info.inputCh <- bgAppendPartsInput{meta, errCh}:
// Receive the error so that the appendParts go-routine does not block on send.
// But the error received is ignored as fs.PutObjectPart() would have already
// returned success to the client.
<-errCh
} }
}() }()
return errCh
} }
// Called on complete-multipart-upload. Returns nil if the required parts have been appended. // Called on complete-multipart-upload. Returns nil if the required parts have been appended.
@ -153,6 +151,8 @@ func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID
break break
} }
if err := appendPart(disk, bucket, object, uploadID, part); err != nil { if err := appendPart(disk, bucket, object, uploadID, part); err != nil {
disk.DeleteFile(minioMetaTmpBucket, uploadID)
appendMeta.Parts = nil
input.errCh <- err input.errCh <- err
break break
} }
@ -175,6 +175,7 @@ func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID
disk.DeleteFile(minioMetaTmpBucket, uploadID) disk.DeleteFile(minioMetaTmpBucket, uploadID)
close(info.timeoutCh) close(info.timeoutCh)
return
} }
} }
} }
@ -182,7 +183,7 @@ func (b *backgroundAppend) appendParts(disk StorageAPI, bucket, object, uploadID
// Appends the "part" to the append-file inside "tmp/" that finally gets moved to the actual location // Appends the "part" to the append-file inside "tmp/" that finally gets moved to the actual location
// upon complete-multipart-upload. // upon complete-multipart-upload.
func appendPart(disk StorageAPI, bucket, object, uploadID string, part objectPartInfo) error { func appendPart(disk StorageAPI, bucket, object, uploadID string, part objectPartInfo) error {
partPath := pathJoin(mpartMetaPrefix, bucket, object, uploadID, part.Name) partPath := pathJoin(bucket, object, uploadID, part.Name)
offset := int64(0) offset := int64(0)
totalLeft := part.Size totalLeft := part.Size
@ -192,18 +193,15 @@ func appendPart(disk StorageAPI, bucket, object, uploadID string, part objectPar
if totalLeft < readSizeV1 { if totalLeft < readSizeV1 {
curLeft = totalLeft curLeft = totalLeft
} }
var n int64
n, err := disk.ReadFile(minioMetaMultipartBucket, partPath, offset, buf[:curLeft]) n, err := disk.ReadFile(minioMetaMultipartBucket, partPath, offset, buf[:curLeft])
if err != nil { if err != nil {
// Check for EOF/ErrUnexpectedEOF not needed as it should never happen as we know // Check for EOF/ErrUnexpectedEOF not needed as it should never happen as we know
// the exact size of the file and hence know the size of buf[] // the exact size of the file and hence know the size of buf[]
// EOF/ErrUnexpectedEOF indicates that the length of file was shorter than part.Size and // EOF/ErrUnexpectedEOF indicates that the length of file was shorter than part.Size and
// hence considered as an error condition. // hence considered as an error condition.
disk.DeleteFile(minioMetaTmpBucket, uploadID)
return err return err
} }
if err = disk.AppendFile(minioMetaTmpBucket, uploadID, buf[:n]); err != nil { if err = disk.AppendFile(minioMetaTmpBucket, uploadID, buf[:n]); err != nil {
disk.DeleteFile(minioMetaTmpBucket, uploadID)
return err return err
} }
offset += n offset += n

@ -401,17 +401,30 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
fsMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size) fsMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size)
partPath := path.Join(bucket, object, uploadID, partSuffix) partPath := path.Join(bucket, object, uploadID, partSuffix)
// Lock the part so that another part upload with same part-number gets blocked
// while the part is getting appended in the background.
partLock := nsMutex.NewNSLock(minioMetaMultipartBucket, partPath)
partLock.Lock()
err = fs.storage.RenameFile(minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath) err = fs.storage.RenameFile(minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath)
if err != nil { if err != nil {
partLock.Unlock()
return "", toObjectErr(traceError(err), minioMetaMultipartBucket, partPath) return "", toObjectErr(traceError(err), minioMetaMultipartBucket, partPath)
} }
uploadIDPath = path.Join(bucket, object, uploadID) uploadIDPath = path.Join(bucket, object, uploadID)
if err = writeFSMetadata(fs.storage, minioMetaMultipartBucket, path.Join(uploadIDPath, fsMetaJSONFile), fsMeta); err != nil { if err = writeFSMetadata(fs.storage, minioMetaMultipartBucket, path.Join(uploadIDPath, fsMetaJSONFile), fsMeta); err != nil {
partLock.Unlock()
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
} }
// Append the part in background. go func() {
fs.bgAppend.append(fs.storage, bucket, object, uploadID, fsMeta) // Append the part in background.
errCh := fs.bgAppend.append(fs.storage, bucket, object, uploadID, fsMeta)
// Also receive the error so that the appendParts go-routine does not block on send.
// But the error received is ignored as fs.PutObjectPart() would have already
// returned success to the client.
<-errCh
partLock.Unlock()
}()
return newMD5Hex, nil return newMD5Hex, nil
} }

Loading…
Cancel
Save