diff --git a/cmd/bitrot.go b/cmd/bitrot.go index b9fcd8f2c..a0b2f4144 100644 --- a/cmd/bitrot.go +++ b/cmd/bitrot.go @@ -158,12 +158,13 @@ func bitrotWriterSum(w io.Writer) []byte { // Verify if a file has bitrot error. func bitrotCheckFile(disk StorageAPI, volume string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) { - buf := make([]byte, shardSize) if algo != HighwayHash256S { - // For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read small buffer + buf := []byte{} + // For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read 0-bytes. _, err = disk.ReadFile(volume, filePath, 0, buf, NewBitrotVerifier(algo, sum)) return err } + buf := make([]byte, shardSize) r := newStreamingBitrotReader(disk, volume, filePath, tillOffset, algo, shardSize) defer closeBitrotReaders([]io.ReaderAt{r}) var offset int64 diff --git a/cmd/bitrot_test.go b/cmd/bitrot_test.go index 94b32594c..03ccfcd5b 100644 --- a/cmd/bitrot_test.go +++ b/cmd/bitrot_test.go @@ -24,7 +24,7 @@ import ( "testing" ) -func TestBitrotReaderWriter(t *testing.T) { +func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) { tmpDir, err := ioutil.TempDir("", "") if err != nil { log.Fatal(err) @@ -41,7 +41,7 @@ func TestBitrotReaderWriter(t *testing.T) { disk.MakeVol(volume) - writer := newBitrotWriter(disk, volume, filePath, 35, HighwayHash256S, 10) + writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10) _, err = writer.Write([]byte("aaaaaaaaaa")) if err != nil { @@ -61,7 +61,7 @@ func TestBitrotReaderWriter(t *testing.T) { } writer.(io.Closer).Close() - reader := newStreamingBitrotReader(disk, volume, filePath, 35, HighwayHash256S, 10) + reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10) b := make([]byte, 10) if _, err = reader.ReadAt(b, 0); err != nil { log.Fatal(err) @@ -76,3 +76,9 @@ func TestBitrotReaderWriter(t *testing.T) { log.Fatal(err) } } + +func TestAllBitrotAlgorithms(t *testing.T) { + for bitrotAlgo := range bitrotAlgorithms { + testBitrotReaderWriterAlgo(t, bitrotAlgo) + } +} diff --git a/cmd/xl-v1-healing.go b/cmd/xl-v1-healing.go index 464c91f4b..f51a419c0 100644 --- a/cmd/xl-v1-healing.go +++ b/cmd/xl-v1-healing.go @@ -251,7 +251,6 @@ func listAllBuckets(storageDisks []StorageAPI) (buckets map[string]VolInfo, // Heals an object by re-writing corrupt/missing erasure blocks. func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, object string, quorum int, dryRun bool) (result madmin.HealResultItem, err error) { - partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, bucket, object) errCount := 0 @@ -432,20 +431,21 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o partActualSize := latestMeta.Parts[partIndex].ActualSize partNumber := latestMeta.Parts[partIndex].Number tillOffset := erasure.ShardFileTillOffset(0, partSize, partSize) - checksumInfo := erasureInfo.GetChecksumInfo(partName) readers := make([]io.ReaderAt, len(latestDisks)) + checksumAlgo := erasureInfo.GetChecksumInfo(partName).Algorithm for i, disk := range latestDisks { if disk == OfflineDisk { continue } - readers[i] = newBitrotReader(disk, bucket, pathJoin(object, partName), tillOffset, checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize()) + checksumInfo := partsMetadata[i].Erasure.GetChecksumInfo(partName) + readers[i] = newBitrotReader(disk, bucket, pathJoin(object, partName), tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize()) } writers := make([]io.Writer, len(outDatedDisks)) for i, disk := range outDatedDisks { if disk == OfflineDisk { continue } - writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, pathJoin(tmpID, partName), tillOffset, checksumInfo.Algorithm, erasure.ShardSize()) + writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, pathJoin(tmpID, partName), tillOffset, checksumAlgo, erasure.ShardSize()) } hErr := erasure.Heal(ctx, readers, writers, partSize) closeBitrotReaders(readers) @@ -467,7 +467,7 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o continue } partsMetadata[i].AddObjectPart(partNumber, partName, "", partSize, partActualSize) - partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{partName, checksumInfo.Algorithm, bitrotWriterSum(writers[i])}) + partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{partName, checksumAlgo, bitrotWriterSum(writers[i])}) } // If all disks are having errors, we give up. @@ -476,14 +476,6 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o } } - // xl.json should be written to all the healed disks. - for index, disk := range outDatedDisks { - if disk == nil { - continue - } - partsMetadata[index] = latestMeta - } - // Generate and write `xl.json` generated from other disks. outDatedDisks, aErr := writeUniqueXLMetadata(ctx, outDatedDisks, minioMetaTmpBucket, tmpID, partsMetadata, diskCount(outDatedDisks)) diff --git a/cmd/xl-v1-metadata.go b/cmd/xl-v1-metadata.go index 6d3a347fd..a42968586 100644 --- a/cmd/xl-v1-metadata.go +++ b/cmd/xl-v1-metadata.go @@ -189,17 +189,9 @@ func newXLMetaV1(object string, dataBlocks, parityBlocks int) (xlMeta xlMetaV1) // Return a new xlMetaV1 initialized using the given xlMetaV1. Used in healing to make sure that we do not copy // over any part's checksum info which will differ for different disks. func newXLMetaFromXLMeta(meta xlMetaV1) xlMetaV1 { - xlMeta := xlMetaV1{} - xlMeta.Version = xlMetaVersion - xlMeta.Format = xlMetaFormat - xlMeta.Minio.Release = ReleaseTag - xlMeta.Erasure = ErasureInfo{ - Algorithm: meta.Erasure.Algorithm, - DataBlocks: meta.Erasure.DataBlocks, - ParityBlocks: meta.Erasure.DataBlocks, - BlockSize: meta.Erasure.BlockSize, - Distribution: meta.Erasure.Distribution, - } + xlMeta := meta + xlMeta.Erasure.Checksums = nil + xlMeta.Parts = nil return xlMeta }