From db5af1b1261ecf4b41f81ed15e8ede1055bdfe42 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 23 Aug 2017 17:58:52 -0700 Subject: [PATCH] fix: tests error conditions should be used properly. (#4833) --- cmd/erasure-createfile.go | 3 ++- cmd/erasure-createfile_test.go | 4 ---- cmd/object-api-putobject_test.go | 2 +- cmd/xl-v1-healing-common_test.go | 7 ++++--- cmd/xl-v1-multipart.go | 11 +++++------ cmd/xl-v1-object.go | 5 ++--- 6 files changed, 14 insertions(+), 18 deletions(-) diff --git a/cmd/erasure-createfile.go b/cmd/erasure-createfile.go index ba1ef248f..479670a6f 100644 --- a/cmd/erasure-createfile.go +++ b/cmd/erasure-createfile.go @@ -38,7 +38,8 @@ func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer [ errChans[i] = make(chan error, 1) // create buffered channel to let finished go-routines die early } - blocks, n := [][]byte{}, len(buffer) + var blocks [][]byte + var n = len(buffer) for n == len(buffer) { n, err = io.ReadFull(src, buffer) if n == 0 && err == io.EOF { diff --git a/cmd/erasure-createfile_test.go b/cmd/erasure-createfile_test.go index a54c5eb06..e86ed9197 100644 --- a/cmd/erasure-createfile_test.go +++ b/cmd/erasure-createfile_test.go @@ -82,10 +82,6 @@ func TestErasureCreateFile(t *testing.T) { setup.Remove() t.Fatalf("Test %d: failed to generate random test data: %v", i, err) } - algorithm := test.algorithm - if !algorithm.Available() { - algorithm = DefaultBitrotAlgorithm - } file, err := storage.CreateFile(bytes.NewReader(data[test.offset:]), "testbucket", "object", buffer, test.algorithm, test.dataBlocks+1) if err != nil && !test.shouldFail { t.Errorf("Test %d: should pass but failed with: %v", i, err) diff --git a/cmd/object-api-putobject_test.go b/cmd/object-api-putobject_test.go index c49e4463f..cdaeb1a16 100644 --- a/cmd/object-api-putobject_test.go +++ b/cmd/object-api-putobject_test.go @@ -229,7 +229,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di sha256sum := "" for i, testCase := range testCases { objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta, sha256sum) - actualErr = errorCause(err) + actualErr = errorCause(actualErr) if actualErr != nil && testCase.shouldPass { t.Errorf("Test %d: %s: Expected to pass, but failed with: %s.", i+1, instanceType, actualErr.Error()) } diff --git a/cmd/xl-v1-healing-common_test.go b/cmd/xl-v1-healing-common_test.go index 7b154d757..88477c95b 100644 --- a/cmd/xl-v1-healing-common_test.go +++ b/cmd/xl-v1-healing-common_test.go @@ -350,7 +350,8 @@ func TestDisksWithAllParts(t *testing.T) { // make data with more than one part partCount := 3 data := bytes.Repeat([]byte("a"), int(globalPutPartSize)*partCount) - xlDisks := obj.(*xlObjects).storageDisks + xl := obj.(*xlObjects) + xlDisks := xl.storageDisks err = obj.MakeBucketWithLocation("bucket", "") if err != nil { @@ -363,8 +364,8 @@ func TestDisksWithAllParts(t *testing.T) { } partsMetadata, errs := readAllXLMetadata(xlDisks, bucket, object) - if err != nil { - t.Fatalf("Failed to read xl meta data %v", err) + if reducedErr := reduceReadQuorumErrs(errs, objectOpIgnoredErrs, xl.readQuorum); reducedErr != nil { + t.Fatalf("Failed to read xl meta data %v", reducedErr) } diskFailures := make(map[int]string) diff --git a/cmd/xl-v1-multipart.go b/cmd/xl-v1-multipart.go index 5075eb108..a22f53fba 100644 --- a/cmd/xl-v1-multipart.go +++ b/cmd/xl-v1-multipart.go @@ -738,15 +738,14 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, xl.writeQuorum); err != nil { return pi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath) } - var rErr error - onlineDisks, rErr = commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum) - if rErr != nil { - return pi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) + + if _, err = commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum); err != nil { + return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } fi, err := xl.statPart(bucket, object, uploadID, partSuffix) if err != nil { - return pi, toObjectErr(rErr, minioMetaMultipartBucket, partSuffix) + return pi, toObjectErr(err, minioMetaMultipartBucket, partSuffix) } // Return success. @@ -1029,7 +1028,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload } // Rename the multipart object to final location. - if onlineDisks, err = renameObject(onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, xl.writeQuorum); err != nil { + if _, err = renameObject(onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, xl.writeQuorum); err != nil { return oi, toObjectErr(err, bucket, object) } diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index 82a416926..6d0035fe0 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -98,7 +98,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string return oi, toObjectErr(err, srcBucket, srcObject) } // Rename atomically `xl.json` from tmp location to destination for each disk. - if onlineDisks, err = renameXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, xl.writeQuorum); err != nil { + if _, err = renameXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, xl.writeQuorum); err != nil { return oi, toObjectErr(err, srcBucket, srcObject) } return xlMeta.ToObjectInfo(srcBucket, srcObject), nil @@ -680,8 +680,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io. } // Rename the successfully written temporary object to final location. - onlineDisks, err = renameObject(onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, xl.writeQuorum) - if err != nil { + if _, err = renameObject(onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, xl.writeQuorum); err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) }