fix: replication metadata comparsion and other fixes (#11410)

- using miniogo.ObjectInfo.UserMetadata is not correct
- using UserTags from Map->String() can change order
- ContentType comparison needs to be removed.
- Compare both lowercase and uppercase key names.
- do not silently error out constructing PutObjectOptions
  if tag parsing fails
- avoid notification for empty object info, failed operations
  should rely on valid objInfo for notification in all
  situations
- optimize copyObject implementation, also introduce a new 
  replication event
- clone ObjectInfo() before scheduling for replication
- add additional headers for comparison
- remove strings.EqualFold comparison avoid unexpected bugs
- fix pool based proxying with multiple pools
- compare only specific metadata

Co-authored-by: Poorna Krishnamoorthy <poornas@users.noreply.github.com>
master
Harshavardhana 4 years ago committed by GitHub
parent 871b450dbd
commit f108873c48
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 6
      cmd/api-headers.go
  2. 14
      cmd/api-response.go
  3. 3
      cmd/bucket-handlers.go
  4. 297
      cmd/bucket-replication.go
  5. 15
      cmd/erasure-object.go
  6. 18
      cmd/erasure-server-pool.go
  7. 2
      cmd/gateway/s3/gateway-s3-sse.go
  8. 29
      cmd/handler-utils.go
  9. 3
      cmd/handler-utils_test.go
  10. 45
      cmd/object-api-datatypes.go
  11. 22
      cmd/object-handlers.go
  12. 2
      cmd/web-handlers.go
  13. 17
      cmd/xl-storage-format-v2.go
  14. 2
      cmd/xl-storage.go

@ -133,18 +133,20 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
} }
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w // https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) { if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue continue
} }
var isSet bool var isSet bool
for _, userMetadataPrefix := range userMetadataKeyPrefixes { for _, userMetadataPrefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(k, userMetadataPrefix) { if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(userMetadataPrefix)) {
continue continue
} }
w.Header()[strings.ToLower(k)] = []string{v} w.Header()[strings.ToLower(k)] = []string{v}
isSet = true isSet = true
break break
} }
if !isSet { if !isSet {
w.Header().Set(k, v) w.Header().Set(k, v)
} }

@ -565,7 +565,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
continue continue
} }
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w // https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) { if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue continue
} }
content.UserMetadata[k] = v content.UserMetadata[k] = v
@ -639,8 +639,16 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType) listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
listPartsResponse.UploadID = partsInfo.UploadID listPartsResponse.UploadID = partsInfo.UploadID
listPartsResponse.StorageClass = globalMinioDefaultStorageClass listPartsResponse.StorageClass = globalMinioDefaultStorageClass
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
listPartsResponse.Owner.ID = globalMinioDefaultOwnerID // Dumb values not meaningful
listPartsResponse.Initiator = Initiator{
ID: globalMinioDefaultOwnerID,
DisplayName: globalMinioDefaultOwnerID,
}
listPartsResponse.Owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: globalMinioDefaultOwnerID,
}
listPartsResponse.MaxParts = partsInfo.MaxParts listPartsResponse.MaxParts = partsInfo.MaxParts
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker

@ -22,6 +22,7 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"net/textproto"
"net/url" "net/url"
"path" "path"
"path/filepath" "path/filepath"
@ -903,7 +904,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Extract metadata to be saved from received Form. // Extract metadata to be saved from received Form.
metadata := make(map[string]string) metadata := make(map[string]string)
err = extractMetadataFromMap(ctx, formValues, metadata) err = extractMetadataFromMime(ctx, textproto.MIMEHeader(formValues), metadata)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return

@ -20,6 +20,7 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"reflect"
"strings" "strings"
"time" "time"
@ -168,14 +169,8 @@ func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToD
} }
// isStandardHeader returns true if header is a supported header and not a custom header // isStandardHeader returns true if header is a supported header and not a custom header
func isStandardHeader(headerKey string) bool { func isStandardHeader(matchHeaderKey string) bool {
key := strings.ToLower(headerKey) return equals(matchHeaderKey, standardHeaders...)
for _, header := range standardHeaders {
if strings.ToLower(header) == key {
return true
}
}
return false
} }
// returns whether object version is a deletemarker and if object qualifies for replication // returns whether object version is a deletemarker and if object qualifies for replication
@ -225,20 +220,44 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
// on target. // on target.
func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectAPI ObjectLayer) { func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectAPI ObjectLayer) {
bucket := dobj.Bucket bucket := dobj.Bucket
versionID := dobj.DeleteMarkerVersionID
if versionID == "" {
versionID = dobj.VersionID
}
rcfg, err := getReplicationConfig(ctx, bucket) rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil { if err != nil || rcfg == nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
Host: "Internal: [Replication]",
EventName: event.ObjectReplicationNotTracked,
})
return return
} }
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn) tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
if tgt == nil { if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, rcfg.RoleArn)) logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, rcfg.RoleArn))
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
Host: "Internal: [Replication]",
EventName: event.ObjectReplicationNotTracked,
})
return return
} }
versionID := dobj.DeleteMarkerVersionID
if versionID == "" {
versionID = dobj.VersionID
}
rmErr := tgt.RemoveObject(ctx, rcfg.GetDestination().Bucket, dobj.ObjectName, miniogo.RemoveObjectOptions{ rmErr := tgt.RemoveObject(ctx, rcfg.GetDestination().Bucket, dobj.ObjectName, miniogo.RemoveObjectOptions{
VersionID: versionID, VersionID: versionID,
Internal: miniogo.AdvancedRemoveOptions{ Internal: miniogo.AdvancedRemoveOptions{
@ -257,6 +276,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectA
} else { } else {
versionPurgeStatus = Failed versionPurgeStatus = Failed
} }
logger.LogIf(ctx, fmt.Errorf("Unable to replicate delete marker to %s/%s(%s): %w", rcfg.GetDestination().Bucket, dobj.ObjectName, versionID, err))
} else { } else {
if dobj.VersionID == "" { if dobj.VersionID == "" {
replicationStatus = string(replication.Completed) replicationStatus = string(replication.Completed)
@ -271,61 +291,78 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectA
} }
// Update metadata on the delete marker or purge permanent delete if replication success. // Update metadata on the delete marker or purge permanent delete if replication success.
objInfo, err := objectAPI.DeleteObject(ctx, bucket, dobj.ObjectName, ObjectOptions{ dobjInfo, err := objectAPI.DeleteObject(ctx, bucket, dobj.ObjectName, ObjectOptions{
VersionID: versionID, VersionID: versionID,
DeleteMarker: dobj.DeleteMarker, DeleteMarker: dobj.DeleteMarker,
DeleteMarkerReplicationStatus: replicationStatus, DeleteMarkerReplicationStatus: replicationStatus,
Versioned: globalBucketVersioningSys.Enabled(bucket),
VersionPurgeStatus: versionPurgeStatus, VersionPurgeStatus: versionPurgeStatus,
Versioned: globalBucketVersioningSys.Enabled(bucket),
VersionSuspended: globalBucketVersioningSys.Suspended(bucket), VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s %s: %w", bucket, dobj.ObjectName, dobj.VersionID, err)) logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, dobj.ObjectName, versionID, err))
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
Host: "Internal: [Replication]",
EventName: eventName,
})
} else {
sendEvent(eventArgs{
BucketName: bucket,
Object: dobjInfo,
Host: "Internal: [Replication]",
EventName: eventName,
})
} }
sendEvent(eventArgs{
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
EventName: eventName,
})
} }
func getCopyObjMetadata(oi ObjectInfo, dest replication.Destination) map[string]string { func getCopyObjMetadata(oi ObjectInfo, dest replication.Destination) map[string]string {
meta := make(map[string]string, len(oi.UserDefined)) meta := make(map[string]string, len(oi.UserDefined))
for k, v := range oi.UserDefined { for k, v := range oi.UserDefined {
if k == xhttp.AmzBucketReplicationStatus { if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
continue continue
} }
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
if equals(k, xhttp.AmzBucketReplicationStatus) {
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue continue
} }
meta[k] = v meta[k] = v
} }
if oi.ContentEncoding != "" { if oi.ContentEncoding != "" {
meta[xhttp.ContentEncoding] = oi.ContentEncoding meta[xhttp.ContentEncoding] = oi.ContentEncoding
} }
if oi.ContentType != "" { if oi.ContentType != "" {
meta[xhttp.ContentType] = oi.ContentType meta[xhttp.ContentType] = oi.ContentType
} }
tag, err := tags.ParseObjectTags(oi.UserTags)
if err != nil { if oi.UserTags != "" {
return nil meta[xhttp.AmzObjectTagging] = oi.UserTags
}
if tag != nil {
meta[xhttp.AmzObjectTagging] = tag.String()
meta[xhttp.AmzTagDirective] = "REPLACE" meta[xhttp.AmzTagDirective] = "REPLACE"
} }
sc := dest.StorageClass sc := dest.StorageClass
if sc == "" { if sc == "" {
sc = oi.StorageClass sc = oi.StorageClass
} }
meta[xhttp.AmzStorageClass] = sc if sc != "" {
if oi.UserTags != "" { meta[xhttp.AmzStorageClass] = sc
meta[xhttp.AmzObjectTagging] = oi.UserTags
} }
meta[xhttp.MinIOSourceMTime] = oi.ModTime.Format(time.RFC3339Nano)
meta[xhttp.MinIOSourceETag] = oi.ETag meta[xhttp.MinIOSourceETag] = oi.ETag
meta[xhttp.MinIOSourceMTime] = oi.ModTime.Format(time.RFC3339Nano)
meta[xhttp.AmzBucketReplicationStatus] = replication.Replica.String() meta[xhttp.AmzBucketReplicationStatus] = replication.Replica.String()
return meta return meta
} }
@ -341,17 +378,13 @@ func putReplicationOpts(ctx context.Context, dest replication.Destination, objIn
} }
meta[k] = v meta[k] = v
} }
tag, err := tags.ParseObjectTags(objInfo.UserTags)
if err != nil {
return
}
sc := dest.StorageClass sc := dest.StorageClass
if sc == "" { if sc == "" {
sc = objInfo.StorageClass sc = objInfo.StorageClass
} }
putOpts = miniogo.PutObjectOptions{ putOpts = miniogo.PutObjectOptions{
UserMetadata: meta, UserMetadata: meta,
UserTags: tag.ToMap(),
ContentType: objInfo.ContentType, ContentType: objInfo.ContentType,
ContentEncoding: objInfo.ContentEncoding, ContentEncoding: objInfo.ContentEncoding,
StorageClass: sc, StorageClass: sc,
@ -362,6 +395,12 @@ func putReplicationOpts(ctx context.Context, dest replication.Destination, objIn
SourceETag: objInfo.ETag, SourceETag: objInfo.ETag,
}, },
} }
if objInfo.UserTags != "" {
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
if tag != nil {
putOpts.UserTags = tag.ToMap()
}
}
if lang, ok := objInfo.UserDefined[xhttp.ContentLanguage]; ok { if lang, ok := objInfo.UserDefined[xhttp.ContentLanguage]; ok {
putOpts.ContentLanguage = lang putOpts.ContentLanguage = lang
} }
@ -400,6 +439,16 @@ const (
replicateAll replicationAction = "all" replicateAll replicationAction = "all"
) )
// matches k1 with all keys, returns 'true' if one of them matches
func equals(k1 string, keys ...string) bool {
for _, k2 := range keys {
if strings.ToLower(k1) == strings.ToLower(k2) {
return true
}
}
return false
}
// returns replicationAction by comparing metadata between source and target // returns replicationAction by comparing metadata between source and target
func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationAction { func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationAction {
// needs full replication // needs full replication
@ -407,83 +456,153 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationActio
oi1.VersionID != oi2.VersionID || oi1.VersionID != oi2.VersionID ||
oi1.Size != oi2.Size || oi1.Size != oi2.Size ||
oi1.DeleteMarker != oi2.IsDeleteMarker || oi1.DeleteMarker != oi2.IsDeleteMarker ||
!oi1.ModTime.Equal(oi2.LastModified) { oi1.ModTime.Unix() != oi2.LastModified.Unix() {
return replicateAll return replicateAll
} }
if oi1.ContentType != oi2.ContentType { if oi1.ContentType != oi2.ContentType {
return replicateMetadata return replicateMetadata
} }
if oi1.ContentEncoding != "" { if oi1.ContentEncoding != "" {
enc, ok := oi2.Metadata[xhttp.ContentEncoding] enc, ok := oi2.Metadata[xhttp.ContentEncoding]
if !ok || strings.Join(enc, "") != oi1.ContentEncoding { if !ok {
enc, ok = oi2.Metadata[strings.ToLower(xhttp.ContentEncoding)]
if !ok {
return replicateMetadata
}
}
if strings.Join(enc, ",") != oi1.ContentEncoding {
return replicateMetadata return replicateMetadata
} }
} }
// compare metadata on both maps to see if meta is identical
for k1, v1 := range oi1.UserDefined { t, _ := tags.ParseObjectTags(oi1.UserTags)
if v2, ok := oi2.UserMetadata[k1]; ok && v1 == v2 { if !reflect.DeepEqual(oi2.UserTags, t.ToMap()) {
continue
}
if v2, ok := oi2.Metadata[k1]; ok && v1 == strings.Join(v2, "") {
continue
}
return replicateMetadata return replicateMetadata
} }
for k1, v1 := range oi2.UserMetadata {
if v2, ok := oi1.UserDefined[k1]; !ok || v1 != v2 { // Compare only necessary headers
return replicateMetadata compareKeys := []string{
"Expires",
"Cache-Control",
"Content-Language",
"Content-Disposition",
"X-Amz-Object-Lock-Mode",
"X-Amz-Object-Lock-Retain-Until-Date",
"X-Amz-Object-Lock-Legal-Hold",
"X-Amz-Website-Redirect-Location",
"X-Amz-Meta-",
}
// compare metadata on both maps to see if meta is identical
compareMeta1 := make(map[string]string)
for k, v := range oi1.UserDefined {
var found bool
for _, prefix := range compareKeys {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
continue
}
found = true
break
}
if found {
compareMeta1[strings.ToLower(k)] = v
} }
} }
for k1, v1slc := range oi2.Metadata {
v1 := strings.Join(v1slc, "") compareMeta2 := make(map[string]string)
if k1 == xhttp.ContentEncoding { //already compared for k, v := range oi2.Metadata {
continue var found bool
for _, prefix := range compareKeys {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
continue
}
found = true
break
} }
if v2, ok := oi1.UserDefined[k1]; !ok || v1 != v2 { if found {
return replicateMetadata compareMeta2[strings.ToLower(k)] = strings.Join(v, ",")
} }
} }
t, _ := tags.MapToObjectTags(oi2.UserTags)
if t.String() != oi1.UserTags { if !reflect.DeepEqual(compareMeta1, compareMeta2) {
return replicateMetadata return replicateMetadata
} }
return replicateNone return replicateNone
} }
// replicateObject replicates the specified version of the object to destination bucket // replicateObject replicates the specified version of the object to destination bucket
// The source object is then updated to reflect the replication status. // The source object is then updated to reflect the replication status.
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) { func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
z, ok := objectAPI.(*erasureServerPools)
if !ok {
return
}
bucket := objInfo.Bucket bucket := objInfo.Bucket
object := objInfo.Name object := objInfo.Name
cfg, err := getReplicationConfig(ctx, bucket) cfg, err := getReplicationConfig(ctx, bucket)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return return
} }
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn) tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
if tgt == nil { if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, cfg.RoleArn)) logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, cfg.RoleArn))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return return
} }
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{ gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
VersionID: objInfo.VersionID, VersionID: objInfo.VersionID,
}) })
if err != nil { if err != nil {
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return return
} }
defer gr.Close() // hold read lock for entire transaction
objInfo = gr.ObjInfo objInfo = gr.ObjInfo
size, err := objInfo.GetActualSize() size, err := objInfo.GetActualSize()
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
gr.Close() sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return return
} }
dest := cfg.GetDestination() dest := cfg.GetDestination()
if dest.Bucket == "" { if dest.Bucket == "" {
gr.Close() logger.LogIf(ctx, fmt.Errorf("Unable to replicate object %s(%s), bucket is empty", objInfo.Name, objInfo.VersionID))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return return
} }
@ -496,7 +615,6 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
if err == nil { if err == nil {
rtype = getReplicationAction(objInfo, oi) rtype = getReplicationAction(objInfo, oi)
if rtype == replicateNone { if rtype == replicateNone {
gr.Close()
// object with same VersionID already exists, replication kicked off by // object with same VersionID already exists, replication kicked off by
// PutObject might have completed. // PutObject might have completed.
return return
@ -504,19 +622,23 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
} }
replicationStatus := replication.Completed replicationStatus := replication.Completed
if rtype != replicateAll { if rtype != replicateAll {
gr.Close()
// replicate metadata for object tagging/copy with metadata replacement // replicate metadata for object tagging/copy with metadata replacement
dstOpts := miniogo.PutObjectOptions{Internal: miniogo.AdvancedPutOptions{SourceVersionID: objInfo.VersionID}} dstOpts := miniogo.PutObjectOptions{Internal: miniogo.AdvancedPutOptions{SourceVersionID: objInfo.VersionID}}
c := &miniogo.Core{Client: tgt.Client} c := &miniogo.Core{Client: tgt.Client}
_, err = c.CopyObject(ctx, dest.Bucket, object, dest.Bucket, object, getCopyObjMetadata(objInfo, dest), dstOpts) if _, err = c.CopyObject(ctx, dest.Bucket, object, dest.Bucket, object, getCopyObjMetadata(objInfo, dest), dstOpts); err != nil {
if err != nil {
replicationStatus = replication.Failed replicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
} }
} else { } else {
target, err := globalBucketMetadataSys.GetBucketTarget(bucket, cfg.RoleArn) target, err := globalBucketMetadataSys.GetBucketTarget(bucket, cfg.RoleArn)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%s", bucket, cfg.RoleArn, err)) logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%s", bucket, cfg.RoleArn, err))
gr.Close() sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return return
} }
@ -535,11 +657,11 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
// r takes over closing gr. // r takes over closing gr.
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b, target.BandwidthLimit) r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b, target.BandwidthLimit)
_, err = tgt.PutObject(ctx, dest.Bucket, object, r, size, putOpts) if _, err = tgt.PutObject(ctx, dest.Bucket, object, r, size, putOpts); err != nil {
if err != nil {
replicationStatus = replication.Failed replicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
} }
r.Close() defer r.Close()
} }
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String() objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
@ -548,7 +670,6 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
} }
// FIXME: add support for missing replication events // FIXME: add support for missing replication events
// - event.ObjectReplicationNotTracked
// - event.ObjectReplicationMissedThreshold // - event.ObjectReplicationMissedThreshold
// - event.ObjectReplicationReplicatedAfterThreshold // - event.ObjectReplicationReplicatedAfterThreshold
var eventName = event.ObjectReplicationComplete var eventName = event.ObjectReplicationComplete
@ -556,16 +677,19 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
eventName = event.ObjectReplicationFailed eventName = event.ObjectReplicationFailed
} }
objInfo.metadataOnly = true // Perform only metadata updates. // This lower level implementation is necessary to avoid write locks from CopyObject.
objInfo, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{ poolIdx, err := z.getPoolIdx(ctx, bucket, object, ObjectOptions{
VersionID: objInfo.VersionID, VersionID: objInfo.VersionID,
}, ObjectOptions{ }, objInfo.Size)
VersionID: objInfo.VersionID,
})
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err)) logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
} else {
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, objInfo.UserDefined, ObjectOptions{
VersionID: objInfo.VersionID,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
}
} }
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: eventName, EventName: eventName,
BucketName: bucket, BucketName: bucket,
@ -703,8 +827,11 @@ func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs
return nil, false return nil, false
} }
} }
// Make sure to match ETag when proxying.
if err = gopts.SetMatchETag(oi.ETag); err != nil {
return nil, false
}
c := miniogo.Core{Client: tgt.Client} c := miniogo.Core{Client: tgt.Client}
obj, _, _, err := c.GetObject(ctx, bucket, object, gopts) obj, _, _, err := c.GetObject(ctx, bucket, object, gopts)
if err != nil { if err != nil {
return nil, false return nil, false
@ -770,6 +897,7 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts Objec
if err != nil { if err != nil {
return nil, oi, false, err return nil, oi, false, err
} }
tags, _ := tags.MapToObjectTags(objInfo.UserTags) tags, _ := tags.MapToObjectTags(objInfo.UserTags)
oi = ObjectInfo{ oi = ObjectInfo{
Bucket: bucket, Bucket: bucket,
@ -784,12 +912,17 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts Objec
Expires: objInfo.Expires, Expires: objInfo.Expires,
StorageClass: objInfo.StorageClass, StorageClass: objInfo.StorageClass,
ReplicationStatus: replication.StatusType(objInfo.ReplicationStatus), ReplicationStatus: replication.StatusType(objInfo.ReplicationStatus),
UserDefined: cloneMSS(objInfo.UserMetadata),
UserTags: tags.String(), UserTags: tags.String(),
} }
if ce, ok := oi.UserDefined[xhttp.ContentEncoding]; ok { for k, v := range objInfo.Metadata {
oi.UserDefined[k] = v[0]
}
ce, ok := oi.UserDefined[xhttp.ContentEncoding]
if !ok {
ce, ok = oi.UserDefined[strings.ToLower(xhttp.ContentEncoding)]
}
if ok {
oi.ContentEncoding = ce oi.ContentEncoding = ce
delete(oi.UserDefined, xhttp.ContentEncoding)
} }
return tgt, oi, true, nil return tgt, oi, true, nil
} }

@ -162,14 +162,6 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts, true) fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts, true)
if err != nil { if err != nil {
if isProxyable(ctx, bucket) && (errors.Is(err, errFileNotFound) || errors.Is(err, errFileVersionNotFound)) {
// proxy to replication target if active-active replication is in place.
reader, proxy := proxyGetToReplicationTarget(ctx, bucket, object, rs, h, opts)
if reader == nil || !proxy {
return nil, toObjectErr(err, bucket, object)
}
return reader, nil
}
return nil, toObjectErr(err, bucket, object) return nil, toObjectErr(err, bucket, object)
} }
@ -454,13 +446,6 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false) fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
if err != nil { if err != nil {
// proxy HEAD to replication target if active-active replication configured on bucket
if isProxyable(ctx, bucket) && (errors.Is(err, errFileNotFound) || errors.Is(err, errFileVersionNotFound)) {
oi, proxy, err := proxyHeadToReplicationTarget(ctx, bucket, object, opts)
if proxy {
return oi, err
}
}
return objInfo, toObjectErr(err, bucket, object) return objInfo, toObjectErr(err, bucket, object)
} }

@ -259,13 +259,15 @@ func (z *erasureServerPools) getPoolIdx(ctx context.Context, bucket, object stri
for i, pool := range z.serverPools { for i, pool := range z.serverPools {
objInfo, err := pool.GetObjectInfo(ctx, bucket, object, opts) objInfo, err := pool.GetObjectInfo(ctx, bucket, object, opts)
switch err.(type) { switch err.(type) {
case VersionNotFound:
// VersionId not found, versionId was specified
case ObjectNotFound: case ObjectNotFound:
// VersionId was not specified but found delete marker or no versions exist. // VersionId was not specified but found delete marker or no versions exist.
case MethodNotAllowed: case MethodNotAllowed:
// VersionId was specified but found delete marker // VersionId was specified but found delete marker
default: default:
// All other unhandled errors return right here.
if err != nil { if err != nil {
// any other un-handled errors return right here.
return -1, err return -1, err
} }
} }
@ -531,6 +533,13 @@ func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object
} }
return gr, nil return gr, nil
} }
if isProxyable(ctx, bucket) {
// proxy to replication target if active-active replication is in place.
reader, proxy := proxyGetToReplicationTarget(ctx, bucket, object, rs, h, opts)
if reader != nil && proxy {
return reader, nil
}
}
if opts.VersionID != "" { if opts.VersionID != "" {
return gr, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID} return gr, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
} }
@ -576,6 +585,13 @@ func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object s
return objInfo, nil return objInfo, nil
} }
object = decodeDirObject(object) object = decodeDirObject(object)
// proxy HEAD to replication target if active-active replication configured on bucket
if isProxyable(ctx, bucket) {
oi, proxy, err := proxyHeadToReplicationTarget(ctx, bucket, object, opts)
if proxy {
return oi, err
}
}
if opts.VersionID != "" { if opts.VersionID != "" {
return objInfo, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID} return objInfo, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
} }

@ -365,7 +365,7 @@ func (l *s3EncObjects) GetObjectInfo(ctx context.Context, bucket string, object
// CopyObject copies an object from source bucket to a destination bucket. // CopyObject copies an object from source bucket to a destination bucket.
func (l *s3EncObjects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, s, d minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { func (l *s3EncObjects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, s, d minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
cpSrcDstSame := strings.EqualFold(path.Join(srcBucket, srcObject), path.Join(dstBucket, dstObject)) cpSrcDstSame := path.Join(srcBucket, srcObject) == path.Join(dstBucket, dstObject)
if cpSrcDstSame { if cpSrcDstSame {
var gwMeta gwMetaV1 var gwMeta gwMetaV1
if s.ServerSideEncryption != nil && d.ServerSideEncryption != nil && if s.ServerSideEncryption != nil && d.ServerSideEncryption != nil &&

@ -26,6 +26,7 @@ import (
"mime/multipart" "mime/multipart"
"net" "net"
"net/http" "net/http"
"net/textproto"
"net/url" "net/url"
"regexp" "regexp"
"strings" "strings"
@ -74,6 +75,7 @@ var supportedHeaders = []string{
"content-language", "content-language",
"content-encoding", "content-encoding",
"content-disposition", "content-disposition",
"x-amz-storage-class",
xhttp.AmzStorageClass, xhttp.AmzStorageClass,
xhttp.AmzObjectTagging, xhttp.AmzObjectTagging,
"expires", "expires",
@ -103,8 +105,6 @@ func isDirectiveReplace(value string) bool {
// All values stored with a key starting with one of the following prefixes // All values stored with a key starting with one of the following prefixes
// must be extracted from the header. // must be extracted from the header.
var userMetadataKeyPrefixes = []string{ var userMetadataKeyPrefixes = []string{
"X-Amz-Meta-",
"X-Minio-Meta-",
"x-amz-meta-", "x-amz-meta-",
"x-minio-meta-", "x-minio-meta-",
} }
@ -115,13 +115,13 @@ func extractMetadata(ctx context.Context, r *http.Request) (metadata map[string]
header := r.Header header := r.Header
metadata = make(map[string]string) metadata = make(map[string]string)
// Extract all query values. // Extract all query values.
err = extractMetadataFromMap(ctx, query, metadata) err = extractMetadataFromMime(ctx, textproto.MIMEHeader(query), metadata)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Extract all header values. // Extract all header values.
err = extractMetadataFromMap(ctx, header, metadata) err = extractMetadataFromMime(ctx, textproto.MIMEHeader(header), metadata)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -133,7 +133,7 @@ func extractMetadata(ctx context.Context, r *http.Request) (metadata map[string]
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w // https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
for k := range metadata { for k := range metadata {
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) { if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
delete(metadata, k) delete(metadata, k)
} }
} }
@ -161,25 +161,32 @@ func extractMetadata(ctx context.Context, r *http.Request) (metadata map[string]
} }
// extractMetadata extracts metadata from map values. // extractMetadata extracts metadata from map values.
func extractMetadataFromMap(ctx context.Context, v map[string][]string, m map[string]string) error { func extractMetadataFromMime(ctx context.Context, v textproto.MIMEHeader, m map[string]string) error {
if v == nil { if v == nil {
logger.LogIf(ctx, errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument return errInvalidArgument
} }
nv := make(textproto.MIMEHeader, len(v))
for k, kv := range v {
// Canonicalize all headers, to remove any duplicates.
nv[http.CanonicalHeaderKey(k)] = kv
}
// Save all supported headers. // Save all supported headers.
for _, supportedHeader := range supportedHeaders { for _, supportedHeader := range supportedHeaders {
if value, ok := v[http.CanonicalHeaderKey(supportedHeader)]; ok { value, ok := nv[http.CanonicalHeaderKey(supportedHeader)]
m[supportedHeader] = value[0] if ok {
} else if value, ok := v[supportedHeader]; ok { m[supportedHeader] = strings.Join(value, ",")
m[supportedHeader] = value[0]
} }
} }
for key := range v { for key := range v {
for _, prefix := range userMetadataKeyPrefixes { for _, prefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(strings.ToLower(key), strings.ToLower(prefix)) { if !strings.HasPrefix(strings.ToLower(key), strings.ToLower(prefix)) {
continue continue
} }
value, ok := v[key] value, ok := nv[http.CanonicalHeaderKey(key)]
if ok { if ok {
m[key] = strings.Join(value, ",") m[key] = strings.Join(value, ",")
break break

@ -22,6 +22,7 @@ import (
"encoding/xml" "encoding/xml"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/textproto"
"os" "os"
"reflect" "reflect"
"strings" "strings"
@ -197,7 +198,7 @@ func TestExtractMetadataHeaders(t *testing.T) {
// Validate if the extracting headers. // Validate if the extracting headers.
for i, testCase := range testCases { for i, testCase := range testCases {
metadata := make(map[string]string) metadata := make(map[string]string)
err := extractMetadataFromMap(context.Background(), testCase.header, metadata) err := extractMetadataFromMime(context.Background(), textproto.MIMEHeader(testCase.header), metadata)
if err != nil && !testCase.shouldFail { if err != nil && !testCase.shouldFail {
t.Fatalf("Test %d failed to extract metadata: %v", i+1, err) t.Fatalf("Test %d failed to extract metadata: %v", i+1, err)
} }

@ -244,6 +244,51 @@ type ObjectInfo struct {
SuccessorModTime time.Time SuccessorModTime time.Time
} }
// Clone - Returns a cloned copy of current objectInfo
func (o ObjectInfo) Clone() (cinfo ObjectInfo) {
cinfo = ObjectInfo{
Bucket: o.Bucket,
Name: o.Name,
ModTime: o.ModTime,
Size: o.Size,
IsDir: o.IsDir,
ETag: o.ETag,
InnerETag: o.InnerETag,
VersionID: o.VersionID,
IsLatest: o.IsLatest,
DeleteMarker: o.DeleteMarker,
TransitionStatus: o.TransitionStatus,
RestoreExpires: o.RestoreExpires,
RestoreOngoing: o.RestoreOngoing,
ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding,
Expires: o.Expires,
CacheStatus: o.CacheStatus,
CacheLookupStatus: o.CacheLookupStatus,
StorageClass: o.StorageClass,
ReplicationStatus: o.ReplicationStatus,
UserTags: o.UserTags,
Parts: o.Parts,
Writer: o.Writer,
Reader: o.Reader,
PutObjReader: o.PutObjReader,
metadataOnly: o.metadataOnly,
versionOnly: o.versionOnly,
keyRotation: o.keyRotation,
backendType: o.backendType,
AccTime: o.AccTime,
Legacy: o.Legacy,
VersionPurgeStatus: o.VersionPurgeStatus,
NumVersions: o.NumVersions,
SuccessorModTime: o.SuccessorModTime,
}
cinfo.UserDefined = make(map[string]string, len(o.UserDefined))
for k, v := range o.UserDefined {
cinfo.UserDefined[k] = v
}
return cinfo
}
// MultipartInfo captures metadata information about the uploadId // MultipartInfo captures metadata information about the uploadId
// this data structure is used primarily for some internal purposes // this data structure is used primarily for some internal purposes
// for verifying upload type such as was the upload // for verifying upload type such as was the upload

@ -1284,7 +1284,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime) response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime)
encodedSuccessResponse := encodeResponse(response) encodedSuccessResponse := encodeResponse(response)
if replicate, sync := mustReplicate(ctx, r, dstBucket, dstObject, objInfo.UserDefined, objInfo.ReplicationStatus.String()); replicate { if replicate, sync := mustReplicate(ctx, r, dstBucket, dstObject, objInfo.UserDefined, objInfo.ReplicationStatus.String()); replicate {
scheduleReplication(ctx, objInfo, objectAPI, sync) scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
} }
setPutObjHeaders(w, objInfo, false) setPutObjHeaders(w, objInfo, false)
@ -1598,7 +1598,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
} }
} }
if replicate, sync := mustReplicate(ctx, r, bucket, object, metadata, ""); replicate { if replicate, sync := mustReplicate(ctx, r, bucket, object, metadata, ""); replicate {
scheduleReplication(ctx, objInfo, objectAPI, sync) scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
} }
setPutObjHeaders(w, objInfo, false) setPutObjHeaders(w, objInfo, false)
@ -2677,7 +2677,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
setPutObjHeaders(w, objInfo, false) setPutObjHeaders(w, objInfo, false)
if replicate, sync := mustReplicate(ctx, r, bucket, object, objInfo.UserDefined, objInfo.ReplicationStatus.String()); replicate { if replicate, sync := mustReplicate(ctx, r, bucket, object, objInfo.UserDefined, objInfo.ReplicationStatus.String()); replicate {
scheduleReplication(ctx, objInfo, objectAPI, sync) scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
} }
// Write success response. // Write success response.
@ -2930,10 +2930,11 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r
return return
} }
if replicate { if replicate {
scheduleReplication(ctx, objInfo, objectAPI, sync) scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
} }
writeSuccessResponseHeadersOnly(w) writeSuccessResponseHeadersOnly(w)
// Notify object event.
// Notify object event.
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectCreatedPutLegalHold, EventName: event.ObjectCreatedPutLegalHold,
BucketName: bucket, BucketName: bucket,
@ -3102,7 +3103,7 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r
return return
} }
if replicate { if replicate {
scheduleReplication(ctx, objInfo, objectAPI, sync) scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
} }
writeSuccessNoContent(w) writeSuccessNoContent(w)
@ -3285,7 +3286,7 @@ func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *h
} }
if replicate { if replicate {
scheduleReplication(ctx, objInfo, objAPI, sync) scheduleReplication(ctx, objInfo.Clone(), objAPI, sync)
} }
if objInfo.VersionID != "" { if objInfo.VersionID != "" {
@ -3340,6 +3341,7 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
} }
oi, err := objAPI.GetObjectInfo(ctx, bucket, object, opts) oi, err := objAPI.GetObjectInfo(ctx, bucket, object, opts)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
@ -3351,14 +3353,14 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r
opts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String() opts.UserDefined[xhttp.AmzBucketReplicationStatus] = replication.Pending.String()
} }
if _, err = objAPI.DeleteObjectTags(ctx, bucket, object, opts); err != nil { oi, err = objAPI.DeleteObjectTags(ctx, bucket, object, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
} }
oi.UserTags = ""
if replicate { if replicate {
scheduleReplication(ctx, oi, objAPI, sync) scheduleReplication(ctx, oi.Clone(), objAPI, sync)
} }
if oi.VersionID != "" { if oi.VersionID != "" {

@ -1298,7 +1298,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
} }
} }
if mustReplicate { if mustReplicate {
scheduleReplication(ctx, objInfo, objectAPI, sync) scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync)
} }
// Notify object created event. // Notify object created event.

@ -363,10 +363,10 @@ func (j xlMetaV2DeleteMarker) ToFileInfo(volume, path string) (FileInfo, error)
Deleted: true, Deleted: true,
} }
for k, v := range j.MetaSys { for k, v := range j.MetaSys {
if strings.EqualFold(k, xhttp.AmzBucketReplicationStatus) { switch {
case equals(k, xhttp.AmzBucketReplicationStatus):
fi.DeleteMarkerReplicationStatus = string(v) fi.DeleteMarkerReplicationStatus = string(v)
} case equals(k, VersionPurgeStatusKey):
if strings.EqualFold(k, VersionPurgeStatusKey) {
fi.VersionPurgeStatus = VersionPurgeStatusType(string(v)) fi.VersionPurgeStatus = VersionPurgeStatusType(string(v))
} }
} }
@ -408,20 +408,19 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
fi.Metadata = make(map[string]string, len(j.MetaUser)+len(j.MetaSys)) fi.Metadata = make(map[string]string, len(j.MetaUser)+len(j.MetaSys))
for k, v := range j.MetaUser { for k, v := range j.MetaUser {
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w // https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) { if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue continue
} }
fi.Metadata[k] = v fi.Metadata[k] = v
} }
for k, v := range j.MetaSys { for k, v := range j.MetaSys {
if strings.EqualFold(strings.ToLower(k), ReservedMetadataPrefixLower+"transition-status") { switch {
case equals(k, ReservedMetadataPrefixLower+"transition-status"):
fi.TransitionStatus = string(v) fi.TransitionStatus = string(v)
} case equals(k, VersionPurgeStatusKey):
if strings.EqualFold(k, VersionPurgeStatusKey) {
fi.VersionPurgeStatus = VersionPurgeStatusType(string(v)) fi.VersionPurgeStatus = VersionPurgeStatusType(string(v))
} case strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower):
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
fi.Metadata[k] = string(v) fi.Metadata[k] = string(v)
} }
} }

@ -386,7 +386,7 @@ func (s *xlStorage) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCac
oi: oi, oi: oi,
bitRotScan: healOpts.Bitrot, bitRotScan: healOpts.Bitrot,
}) })
item.healReplication(ctx, objAPI, oi, &sizeS) item.healReplication(ctx, objAPI, oi.Clone(), &sizeS)
} }
} }
sizeS.totalSize = totalSize sizeS.totalSize = totalSize

Loading…
Cancel
Save