Remove unnecessary error log messages (#6186)

master
kannappanr 6 years ago committed by GitHub
parent f5df3b4795
commit c7946ab9ab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      cmd/fs-v1-helpers.go
  2. 45
      cmd/gateway/azure/gateway-azure.go
  3. 20
      cmd/gateway/s3/gateway-s3.go
  4. 5
      cmd/object-api-input-checks.go
  5. 5
      cmd/xl-v1-common.go
  6. 1
      cmd/xl-v1-multipart.go
  7. 2
      cmd/xl-v1-object.go
  8. 4
      cmd/xl-v1-utils.go

@ -345,7 +345,9 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, buf []
if buf != nil { if buf != nil {
bytesWritten, err = io.CopyBuffer(writer, reader, buf) bytesWritten, err = io.CopyBuffer(writer, reader, buf)
if err != nil { if err != nil {
if err != io.ErrUnexpectedEOF {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
}
return 0, err return 0, err
} }
} else { } else {

@ -193,7 +193,6 @@ func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string)
storage.BlobProperties, error) { storage.BlobProperties, error) {
for k := range s3Metadata { for k := range s3Metadata {
if strings.Contains(k, "--") { if strings.Contains(k, "--") {
logger.LogIf(ctx, minio.UnsupportedMetadata{})
return storage.BlobMetadata{}, storage.BlobProperties{}, minio.UnsupportedMetadata{} return storage.BlobMetadata{}, storage.BlobProperties{}, minio.UnsupportedMetadata{}
} }
} }
@ -377,18 +376,12 @@ func getAzureUploadID() (string, error) {
// checkAzureUploadID - returns error in case of given string is upload ID. // checkAzureUploadID - returns error in case of given string is upload ID.
func checkAzureUploadID(ctx context.Context, uploadID string) (err error) { func checkAzureUploadID(ctx context.Context, uploadID string) (err error) {
if len(uploadID) != 16 { if len(uploadID) != 16 {
logger.LogIf(ctx, minio.MalformedUploadID{
UploadID: uploadID,
})
return minio.MalformedUploadID{ return minio.MalformedUploadID{
UploadID: uploadID, UploadID: uploadID,
} }
} }
if _, err = hex.DecodeString(uploadID); err != nil { if _, err = hex.DecodeString(uploadID); err != nil {
logger.LogIf(ctx, minio.MalformedUploadID{
UploadID: uploadID,
})
return minio.MalformedUploadID{ return minio.MalformedUploadID{
UploadID: uploadID, UploadID: uploadID,
} }
@ -449,7 +442,6 @@ func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, locat
// in azure documentation, so we will simply use the same function here. // in azure documentation, so we will simply use the same function here.
// Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata // Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata
if !minio.IsValidBucketName(bucket) { if !minio.IsValidBucketName(bucket) {
logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
return minio.BucketNameInvalid{Bucket: bucket} return minio.BucketNameInvalid{Bucket: bucket}
} }
@ -457,7 +449,6 @@ func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, locat
err := container.Create(&storage.CreateContainerOptions{ err := container.Create(&storage.CreateContainerOptions{
Access: storage.ContainerAccessTypePrivate, Access: storage.ContainerAccessTypePrivate,
}) })
logger.LogIf(ctx, err)
return azureToObjectError(err, bucket) return azureToObjectError(err, bucket)
} }
@ -469,7 +460,6 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
Prefix: bucket, Prefix: bucket,
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return bi, azureToObjectError(err, bucket) return bi, azureToObjectError(err, bucket)
} }
for _, container := range resp.Containers { for _, container := range resp.Containers {
@ -490,7 +480,6 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) { func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
resp, err := a.client.ListContainers(storage.ListContainersParameters{}) resp, err := a.client.ListContainers(storage.ListContainersParameters{})
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return nil, azureToObjectError(err) return nil, azureToObjectError(err)
} }
for _, container := range resp.Containers { for _, container := range resp.Containers {
@ -511,7 +500,6 @@ func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketI
func (a *azureObjects) DeleteBucket(ctx context.Context, bucket string) error { func (a *azureObjects) DeleteBucket(ctx context.Context, bucket string) error {
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
err := container.Delete(nil) err := container.Delete(nil)
logger.LogIf(ctx, err)
return azureToObjectError(err, bucket) return azureToObjectError(err, bucket)
} }
@ -549,7 +537,6 @@ func (a *azureObjects) ListObjects(ctx context.Context, bucket, prefix, marker,
MaxResults: uint(maxKeys), MaxResults: uint(maxKeys),
}) })
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return result, azureToObjectError(err, bucket, prefix) return result, azureToObjectError(err, bucket, prefix)
} }
@ -637,7 +624,6 @@ func (a *azureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, contin
func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error { func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
// startOffset cannot be negative. // startOffset cannot be negative.
if startOffset < 0 { if startOffset < 0 {
logger.LogIf(ctx, minio.InvalidRange{})
return azureToObjectError(minio.InvalidRange{}, bucket, object) return azureToObjectError(minio.InvalidRange{}, bucket, object)
} }
@ -657,7 +643,6 @@ func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, sta
}) })
} }
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return azureToObjectError(err, bucket, object) return azureToObjectError(err, bucket, object)
} }
_, err = io.Copy(writer, rc) _, err = io.Copy(writer, rc)
@ -672,7 +657,6 @@ func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string)
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.GetProperties(nil) err = blob.GetProperties(nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }
@ -698,7 +682,6 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, dat
} }
err = blob.CreateBlockBlobFromReader(data, nil) err = blob.CreateBlockBlobFromReader(data, nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }
return a.GetObjectInfo(ctx, bucket, object) return a.GetObjectInfo(ctx, bucket, object)
@ -716,7 +699,6 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des
destBlob.Metadata = azureMeta destBlob.Metadata = azureMeta
err = destBlob.Copy(srcBlobURL, nil) err = destBlob.Copy(srcBlobURL, nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, srcBucket, srcObject) return objInfo, azureToObjectError(err, srcBucket, srcObject)
} }
// Azure will copy metadata from the source object when an empty metadata map is provided. // Azure will copy metadata from the source object when an empty metadata map is provided.
@ -726,14 +708,12 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des
destBlob.Metadata = azureMeta destBlob.Metadata = azureMeta
err = destBlob.SetMetadata(nil) err = destBlob.SetMetadata(nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, srcBucket, srcObject) return objInfo, azureToObjectError(err, srcBucket, srcObject)
} }
} }
destBlob.Properties = props destBlob.Properties = props
err = destBlob.SetProperties(nil) err = destBlob.SetProperties(nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, srcBucket, srcObject) return objInfo, azureToObjectError(err, srcBucket, srcObject)
} }
return a.GetObjectInfo(ctx, destBucket, destObject) return a.GetObjectInfo(ctx, destBucket, destObject)
@ -769,14 +749,12 @@ func (a *azureObjects) checkUploadIDExists(ctx context.Context, bucketName, obje
blob := a.client.GetContainerReference(bucketName).GetBlobReference( blob := a.client.GetContainerReference(bucketName).GetBlobReference(
getAzureMetadataObjectName(objectName, uploadID)) getAzureMetadataObjectName(objectName, uploadID))
err = blob.GetMetadata(nil) err = blob.GetMetadata(nil)
logger.LogIf(ctx, err)
err = azureToObjectError(err, bucketName, objectName) err = azureToObjectError(err, bucketName, objectName)
oerr := minio.ObjectNotFound{ oerr := minio.ObjectNotFound{
Bucket: bucketName, Bucket: bucketName,
Object: objectName, Object: objectName,
} }
if err == oerr { if err == oerr {
logger.LogIf(ctx, minio.InvalidUploadID{UploadID: uploadID})
err = minio.InvalidUploadID{ err = minio.InvalidUploadID{
UploadID: uploadID, UploadID: uploadID,
} }
@ -802,7 +780,6 @@ func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object st
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil) err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return "", azureToObjectError(err, bucket, metadataObject) return "", azureToObjectError(err, bucket, metadataObject)
} }
@ -839,7 +816,6 @@ func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, upload
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil) err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return info, azureToObjectError(err, bucket, object) return info, azureToObjectError(err, bucket, object)
} }
subPartNumber++ subPartNumber++
@ -871,7 +847,6 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
return result, nil return result, nil
} }
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return result, azureToObjectError(err, bucket, object) return result, azureToObjectError(err, bucket, object)
} }
// Build a sorted list of parts and return the requested entries. // Build a sorted list of parts and return the requested entries.
@ -881,7 +856,6 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
var parsedUploadID string var parsedUploadID string
var md5Hex string var md5Hex string
if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil { if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object) return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object)
} }
if parsedUploadID != uploadID { if parsedUploadID != uploadID {
@ -899,7 +873,6 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
if part.ETag != md5Hex { if part.ETag != md5Hex {
// If two parts of same partNumber were uploaded with different contents // If two parts of same partNumber were uploaded with different contents
// return error as we won't be able to decide which the latest part is. // return error as we won't be able to decide which the latest part is.
logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object) return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object)
} }
part.Size += block.Size part.Size += block.Size
@ -966,7 +939,6 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
var metadataReader io.Reader var metadataReader io.Reader
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
if metadataReader, err = blob.Get(nil); err != nil { if metadataReader, err = blob.Get(nil); err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, metadataObject) return objInfo, azureToObjectError(err, bucket, metadataObject)
} }
@ -990,7 +962,6 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object) objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object)
resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil) resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }
@ -1038,11 +1009,6 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
// Error out if parts except last part sizing < 5MiB. // Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] { for i, size := range partSizes[:len(partSizes)-1] {
if size < azureS3MinPartSize { if size < azureS3MinPartSize {
logger.LogIf(ctx, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
})
return objInfo, minio.PartTooSmall{ return objInfo, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber, PartNumber: uploadedParts[i].PartNumber,
PartSize: size, PartSize: size,
@ -1053,23 +1019,19 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
err = objBlob.PutBlockList(allBlocks, nil) err = objBlob.PutBlockList(allBlocks, nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }
if len(metadata.Metadata) > 0 { if len(metadata.Metadata) > 0 {
objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(ctx, metadata.Metadata) objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(ctx, metadata.Metadata)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }
err = objBlob.SetProperties(nil) err = objBlob.SetProperties(nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }
err = objBlob.SetMetadata(nil) err = objBlob.SetMetadata(nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }
} }
@ -1099,15 +1061,12 @@ func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, bucke
} }
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 { if len(policies) != 1 {
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{} return minio.NotImplemented{}
} }
if policies[0].Prefix != prefix { if policies[0].Prefix != prefix {
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{} return minio.NotImplemented{}
} }
if policies[0].Policy != miniogopolicy.BucketPolicyReadOnly { if policies[0].Policy != miniogopolicy.BucketPolicyReadOnly {
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{} return minio.NotImplemented{}
} }
perm := storage.ContainerPermissions{ perm := storage.ContainerPermissions{
@ -1116,7 +1075,6 @@ func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, bucke
} }
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
err = container.SetPermissions(perm, nil) err = container.SetPermissions(perm, nil)
logger.LogIf(ctx, err)
return azureToObjectError(err, bucket) return azureToObjectError(err, bucket)
} }
@ -1125,14 +1083,12 @@ func (a *azureObjects) GetBucketPolicy(ctx context.Context, bucket string) (*pol
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
perm, err := container.GetPermissions(nil) perm, err := container.GetPermissions(nil)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return nil, azureToObjectError(err, bucket) return nil, azureToObjectError(err, bucket)
} }
if perm.AccessType == storage.ContainerAccessTypePrivate { if perm.AccessType == storage.ContainerAccessTypePrivate {
return nil, minio.BucketPolicyNotFound{Bucket: bucket} return nil, minio.BucketPolicyNotFound{Bucket: bucket}
} else if perm.AccessType != storage.ContainerAccessTypeContainer { } else if perm.AccessType != storage.ContainerAccessTypeContainer {
logger.LogIf(ctx, minio.NotImplemented{})
return nil, azureToObjectError(minio.NotImplemented{}) return nil, azureToObjectError(minio.NotImplemented{})
} }
@ -1165,6 +1121,5 @@ func (a *azureObjects) DeleteBucketPolicy(ctx context.Context, bucket string) er
} }
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
err := container.SetPermissions(perm, nil) err := container.SetPermissions(perm, nil)
logger.LogIf(ctx, err)
return azureToObjectError(err) return azureToObjectError(err)
} }

@ -223,13 +223,11 @@ func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location
// access to these buckets. // access to these buckets.
// Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html // Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
if s3utils.CheckValidBucketName(bucket) != nil { if s3utils.CheckValidBucketName(bucket) != nil {
logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
return minio.BucketNameInvalid{Bucket: bucket} return minio.BucketNameInvalid{Bucket: bucket}
} }
err := l.Client.MakeBucket(bucket, location) err := l.Client.MakeBucket(bucket, location)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket) return minio.ErrorRespToObjectError(err, bucket)
} }
return err return err
@ -239,7 +237,6 @@ func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location
func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, e error) { func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, e error) {
buckets, err := l.Client.ListBuckets() buckets, err := l.Client.ListBuckets()
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return bi, minio.ErrorRespToObjectError(err, bucket) return bi, minio.ErrorRespToObjectError(err, bucket)
} }
@ -261,7 +258,6 @@ func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.
func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) { func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) {
buckets, err := l.Client.ListBuckets() buckets, err := l.Client.ListBuckets()
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return nil, minio.ErrorRespToObjectError(err) return nil, minio.ErrorRespToObjectError(err)
} }
@ -280,7 +276,6 @@ func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error)
func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string) error { func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string) error {
err := l.Client.RemoveBucket(bucket) err := l.Client.RemoveBucket(bucket)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket) return minio.ErrorRespToObjectError(err, bucket)
} }
return nil return nil
@ -290,7 +285,6 @@ func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string) error {
func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) { func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys) result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return loi, minio.ErrorRespToObjectError(err, bucket) return loi, minio.ErrorRespToObjectError(err, bucket)
} }
@ -301,7 +295,6 @@ func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix strin
func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) { func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys, startAfter) result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys, startAfter)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return loi, minio.ErrorRespToObjectError(err, bucket) return loi, minio.ErrorRespToObjectError(err, bucket)
} }
@ -316,7 +309,6 @@ func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
// length indicates the total length of the object. // length indicates the total length of the object.
func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string) error { func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string) error {
if length < 0 && length != -1 { if length < 0 && length != -1 {
logger.LogIf(ctx, minio.InvalidRange{})
return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key) return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key)
} }
@ -329,7 +321,6 @@ func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, st
} }
object, _, err := l.Client.GetObject(bucket, key, opts) object, _, err := l.Client.GetObject(bucket, key, opts)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, key) return minio.ErrorRespToObjectError(err, bucket, key)
} }
defer object.Close() defer object.Close()
@ -345,7 +336,6 @@ func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, st
func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string) (objInfo minio.ObjectInfo, err error) { func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{}) oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{})
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object)
} }
@ -356,7 +346,6 @@ func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object str
func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata)) oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata))
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, minio.ErrorRespToObjectError(err, bucket, object) return objInfo, minio.ErrorRespToObjectError(err, bucket, object)
} }
@ -372,7 +361,6 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
srcInfo.UserDefined["x-amz-metadata-directive"] = "REPLACE" srcInfo.UserDefined["x-amz-metadata-directive"] = "REPLACE"
srcInfo.UserDefined["x-amz-copy-source-if-match"] = srcInfo.ETag srcInfo.UserDefined["x-amz-copy-source-if-match"] = srcInfo.ETag
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo.UserDefined); err != nil { if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo.UserDefined); err != nil {
logger.LogIf(ctx, err)
return objInfo, minio.ErrorRespToObjectError(err, srcBucket, srcObject) return objInfo, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
} }
return l.GetObjectInfo(ctx, dstBucket, dstObject) return l.GetObjectInfo(ctx, dstBucket, dstObject)
@ -382,7 +370,6 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string) error { func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object) err := l.Client.RemoveObject(bucket, object)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, object) return minio.ErrorRespToObjectError(err, bucket, object)
} }
@ -405,7 +392,6 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
opts := miniogo.PutObjectOptions{UserMetadata: metadata} opts := miniogo.PutObjectOptions{UserMetadata: metadata}
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts) uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return uploadID, minio.ErrorRespToObjectError(err, bucket, object) return uploadID, minio.ErrorRespToObjectError(err, bucket, object)
} }
return uploadID, nil return uploadID, nil
@ -415,7 +401,6 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, e error) { func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, e error) {
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString()) info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString())
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return pi, minio.ErrorRespToObjectError(err, bucket, object) return pi, minio.ErrorRespToObjectError(err, bucket, object)
} }
@ -433,7 +418,6 @@ func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, de
completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject, completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject,
uploadID, partID, startOffset, length, srcInfo.UserDefined) uploadID, partID, startOffset, length, srcInfo.UserDefined)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return p, minio.ErrorRespToObjectError(err, srcBucket, srcObject) return p, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
} }
p.PartNumber = completePart.PartNumber p.PartNumber = completePart.PartNumber
@ -454,7 +438,6 @@ func (l *s3Objects) ListObjectParts(ctx context.Context, bucket string, object s
// AbortMultipartUpload aborts a ongoing multipart upload // AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error { func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error {
err := l.Client.AbortMultipartUpload(bucket, object, uploadID) err := l.Client.AbortMultipartUpload(bucket, object, uploadID)
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, object) return minio.ErrorRespToObjectError(err, bucket, object)
} }
@ -462,7 +445,6 @@ func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, obj
func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, e error) { func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, e error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts)) err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts))
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return oi, minio.ErrorRespToObjectError(err, bucket, object) return oi, minio.ErrorRespToObjectError(err, bucket, object)
} }
@ -479,7 +461,6 @@ func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, bucketPo
} }
if err := l.Client.SetBucketPolicy(bucket, string(data)); err != nil { if err := l.Client.SetBucketPolicy(bucket, string(data)); err != nil {
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket) return minio.ErrorRespToObjectError(err, bucket)
} }
@ -500,7 +481,6 @@ func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (*policy
// DeleteBucketPolicy deletes all policies on bucket // DeleteBucketPolicy deletes all policies on bucket
func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error { func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
if err := l.Client.SetBucketPolicy(bucket, ""); err != nil { if err := l.Client.SetBucketPolicy(bucket, ""); err != nil {
logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, "") return minio.ErrorRespToObjectError(err, bucket, "")
} }
return nil return nil

@ -196,11 +196,6 @@ func checkPutObjectArgs(ctx context.Context, bucket, object string, obj ObjectLa
hasPrefix(object, slashSeparator) || hasPrefix(object, slashSeparator) ||
(hasSuffix(object, slashSeparator) && size != 0) || (hasSuffix(object, slashSeparator) && size != 0) ||
!IsValidObjectPrefix(object) { !IsValidObjectPrefix(object) {
logger.LogIf(ctx, ObjectNameInvalid{
Bucket: bucket,
Object: object,
})
return ObjectNameInvalid{ return ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,

@ -69,11 +69,6 @@ func (xl xlObjects) isObject(bucket, prefix string) (ok bool) {
if IsErrIgnored(err, xlTreeWalkIgnoredErrs...) { if IsErrIgnored(err, xlTreeWalkIgnoredErrs...) {
continue continue
} }
reqInfo := &logger.ReqInfo{BucketName: bucket}
reqInfo.AppendTags("prefix", prefix)
reqInfo.AppendTags("xlMetaJSONFile", xlMetaJSONFile)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
} // Exhausted all disks - return false. } // Exhausted all disks - return false.
return false return false
} }

@ -778,7 +778,6 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// Deny if WORM is enabled // Deny if WORM is enabled
if globalWORMEnabled { if globalWORMEnabled {
if xl.isObject(bucket, object) { if xl.isObject(bucket, object) {
logger.LogIf(ctx, ObjectAlreadyExists{Bucket: bucket, Object: object})
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object} return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
} }
} }

@ -462,7 +462,6 @@ func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBuc
defer wg.Done() defer wg.Done()
if err := disk.RenameFile(srcBucket, srcEntry, dstBucket, dstEntry); err != nil { if err := disk.RenameFile(srcBucket, srcEntry, dstBucket, dstEntry); err != nil {
if !IsErrIgnored(err, ignoredErr...) { if !IsErrIgnored(err, ignoredErr...) {
logger.LogIf(ctx, err)
errs[index] = err errs[index] = err
} }
} }
@ -743,7 +742,6 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string,
// Deny if WORM is enabled // Deny if WORM is enabled
if globalWORMEnabled { if globalWORMEnabled {
if xl.isObject(bucket, object) { if xl.isObject(bucket, object) {
logger.LogIf(ctx, ObjectAlreadyExists{Bucket: bucket, Object: object})
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object} return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
} }
} }

@ -66,12 +66,8 @@ func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error)
func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, quorum int, quorumErr error) error { func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, quorum int, quorumErr error) error {
maxCount, maxErr := reduceErrs(errs, ignoredErrs) maxCount, maxErr := reduceErrs(errs, ignoredErrs)
if maxCount >= quorum { if maxCount >= quorum {
if maxErr != errFileNotFound && maxErr != errVolumeNotFound {
logger.LogIf(ctx, maxErr)
}
return maxErr return maxErr
} }
logger.LogIf(ctx, quorumErr)
return quorumErr return quorumErr
} }

Loading…
Cancel
Save