fix: improve performance ListObjectParts in FS mode (#10510)

from 20s for 10000 parts to less than 1sec

Without the patch
```
~ time aws --endpoint-url=http://localhost:9000 --profile minio s3api \
       list-parts --bucket testbucket --key test \
       --upload-id c1cd1f50-ea9a-4824-881c-63b5de95315a

real    0m20.394s
user    0m0.589s
sys     0m0.174s
```

With the patch
```
~ time aws --endpoint-url=http://localhost:9000 --profile minio s3api \
       list-parts --bucket testbucket --key test \
       --upload-id c1cd1f50-ea9a-4824-881c-63b5de95315a

real    0m0.891s
user    0m0.624s
sys     0m0.182s
```

fixes #10503
master
Harshavardhana 4 years ago committed by GitHub
parent 00555c747e
commit 4a36cd7035
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 71
      cmd/fs-v1-multipart.go

@ -446,52 +446,51 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
return result, toObjectErr(err, bucket) return result, toObjectErr(err, bucket)
} }
partsMap := make(map[int]string) partsMap := make(map[int]PartInfo)
for _, entry := range entries { for _, entry := range entries {
if entry == fs.metaJSONFile { if entry == fs.metaJSONFile {
continue continue
} }
partNumber, etag1, _, derr := fs.decodePartFile(entry)
partNumber, currentEtag, actualSize, derr := fs.decodePartFile(entry)
if derr != nil { if derr != nil {
// Skip part files whose name don't match expected format. These could be backend filesystem specific files. // Skip part files whose name don't match expected format. These could be backend filesystem specific files.
continue continue
} }
etag2, ok := partsMap[partNumber]
if !ok { entryStat, err := fsStatFile(ctx, pathJoin(uploadIDDir, entry))
partsMap[partNumber] = etag1 if err != nil {
continue continue
} }
stat1, serr := fsStatFile(ctx, pathJoin(uploadIDDir, getPartFile(entries, partNumber, etag1)))
if serr != nil { currentMeta := PartInfo{
return result, toObjectErr(serr) PartNumber: partNumber,
ETag: currentEtag,
ActualSize: actualSize,
Size: entryStat.Size(),
LastModified: entryStat.ModTime(),
} }
stat2, serr := fsStatFile(ctx, pathJoin(uploadIDDir, getPartFile(entries, partNumber, etag2)))
if serr != nil { cachedMeta, ok := partsMap[partNumber]
return result, toObjectErr(serr) if !ok {
partsMap[partNumber] = currentMeta
continue
} }
if stat1.ModTime().After(stat2.ModTime()) {
partsMap[partNumber] = etag1 if currentMeta.LastModified.After(cachedMeta.LastModified) {
partsMap[partNumber] = currentMeta
} }
} }
var parts []PartInfo var parts []PartInfo
var actualSize int64 for _, partInfo := range partsMap {
for partNumber, etag := range partsMap { parts = append(parts, partInfo)
partFile := getPartFile(entries, partNumber, etag)
if partFile == "" {
return result, InvalidPart{}
}
// Read the actualSize from the pathFileName.
subParts := strings.Split(partFile, ".")
actualSize, err = strconv.ParseInt(subParts[len(subParts)-1], 10, 64)
if err != nil {
return result, InvalidPart{}
}
parts = append(parts, PartInfo{PartNumber: partNumber, ETag: etag, ActualSize: actualSize})
} }
sort.Slice(parts, func(i int, j int) bool { sort.Slice(parts, func(i int, j int) bool {
return parts[i].PartNumber < parts[j].PartNumber return parts[i].PartNumber < parts[j].PartNumber
}) })
i := 0 i := 0
if partNumberMarker != 0 { if partNumberMarker != 0 {
// If the marker was set, skip the entries till the marker. // If the marker was set, skip the entries till the marker.
@ -515,21 +514,19 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
result.NextPartNumberMarker = result.Parts[partsCount-1].PartNumber result.NextPartNumberMarker = result.Parts[partsCount-1].PartNumber
} }
} }
for i, part := range result.Parts {
var stat os.FileInfo rc, _, err := fsOpenFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile), 0)
stat, err = fsStatFile(ctx, pathJoin(uploadIDDir, if err != nil {
fs.encodePartFile(part.PartNumber, part.ETag, part.ActualSize))) if err == errFileNotFound || err == errFileAccessDenied {
if err != nil { return result, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID}
return result, toObjectErr(err)
} }
result.Parts[i].LastModified = stat.ModTime() return result, toObjectErr(err, bucket, object)
result.Parts[i].Size = part.ActualSize
} }
defer rc.Close()
fsMetaBytes, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile)) fsMetaBytes, err := ioutil.ReadAll(rc)
if err != nil { if err != nil {
logger.LogIf(ctx, err) return result, toObjectErr(err, bucket, object)
return result, err
} }
var fsMeta fsMetaV1 var fsMeta fsMetaV1

Loading…
Cancel
Save