Fix healing whole file bitrot (#7123)

* Use 0-byte file for bitrot verification of whole-file-bitrot files

Also pass the right checksum information for bitrot verification

* Copy xlMeta info from latest meta except []checksums and []Parts while healing
master
Krishna Srinivas 6 years ago committed by Harshavardhana
parent 74c2048ea9
commit 51ec61ee94
  1. 5
      cmd/bitrot.go
  2. 12
      cmd/bitrot_test.go
  3. 18
      cmd/xl-v1-healing.go
  4. 14
      cmd/xl-v1-metadata.go

@ -158,12 +158,13 @@ func bitrotWriterSum(w io.Writer) []byte {
// Verify if a file has bitrot error.
func bitrotCheckFile(disk StorageAPI, volume string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) {
buf := make([]byte, shardSize)
if algo != HighwayHash256S {
// For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read small buffer
buf := []byte{}
// For whole-file bitrot we don't need to read the entire file as the bitrot verify happens on the server side even if we read 0-bytes.
_, err = disk.ReadFile(volume, filePath, 0, buf, NewBitrotVerifier(algo, sum))
return err
}
buf := make([]byte, shardSize)
r := newStreamingBitrotReader(disk, volume, filePath, tillOffset, algo, shardSize)
defer closeBitrotReaders([]io.ReaderAt{r})
var offset int64

@ -24,7 +24,7 @@ import (
"testing"
)
func TestBitrotReaderWriter(t *testing.T) {
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
log.Fatal(err)
@ -41,7 +41,7 @@ func TestBitrotReaderWriter(t *testing.T) {
disk.MakeVol(volume)
writer := newBitrotWriter(disk, volume, filePath, 35, HighwayHash256S, 10)
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
@ -61,7 +61,7 @@ func TestBitrotReaderWriter(t *testing.T) {
}
writer.(io.Closer).Close()
reader := newStreamingBitrotReader(disk, volume, filePath, 35, HighwayHash256S, 10)
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
b := make([]byte, 10)
if _, err = reader.ReadAt(b, 0); err != nil {
log.Fatal(err)
@ -76,3 +76,9 @@ func TestBitrotReaderWriter(t *testing.T) {
log.Fatal(err)
}
}
func TestAllBitrotAlgorithms(t *testing.T) {
for bitrotAlgo := range bitrotAlgorithms {
testBitrotReaderWriterAlgo(t, bitrotAlgo)
}
}

@ -251,7 +251,6 @@ func listAllBuckets(storageDisks []StorageAPI) (buckets map[string]VolInfo,
// Heals an object by re-writing corrupt/missing erasure blocks.
func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, object string,
quorum int, dryRun bool) (result madmin.HealResultItem, err error) {
partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, bucket, object)
errCount := 0
@ -432,20 +431,21 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o
partActualSize := latestMeta.Parts[partIndex].ActualSize
partNumber := latestMeta.Parts[partIndex].Number
tillOffset := erasure.ShardFileTillOffset(0, partSize, partSize)
checksumInfo := erasureInfo.GetChecksumInfo(partName)
readers := make([]io.ReaderAt, len(latestDisks))
checksumAlgo := erasureInfo.GetChecksumInfo(partName).Algorithm
for i, disk := range latestDisks {
if disk == OfflineDisk {
continue
}
readers[i] = newBitrotReader(disk, bucket, pathJoin(object, partName), tillOffset, checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize())
checksumInfo := partsMetadata[i].Erasure.GetChecksumInfo(partName)
readers[i] = newBitrotReader(disk, bucket, pathJoin(object, partName), tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize())
}
writers := make([]io.Writer, len(outDatedDisks))
for i, disk := range outDatedDisks {
if disk == OfflineDisk {
continue
}
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, pathJoin(tmpID, partName), tillOffset, checksumInfo.Algorithm, erasure.ShardSize())
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, pathJoin(tmpID, partName), tillOffset, checksumAlgo, erasure.ShardSize())
}
hErr := erasure.Heal(ctx, readers, writers, partSize)
closeBitrotReaders(readers)
@ -467,7 +467,7 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o
continue
}
partsMetadata[i].AddObjectPart(partNumber, partName, "", partSize, partActualSize)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{partName, checksumInfo.Algorithm, bitrotWriterSum(writers[i])})
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{partName, checksumAlgo, bitrotWriterSum(writers[i])})
}
// If all disks are having errors, we give up.
@ -476,14 +476,6 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o
}
}
// xl.json should be written to all the healed disks.
for index, disk := range outDatedDisks {
if disk == nil {
continue
}
partsMetadata[index] = latestMeta
}
// Generate and write `xl.json` generated from other disks.
outDatedDisks, aErr := writeUniqueXLMetadata(ctx, outDatedDisks, minioMetaTmpBucket, tmpID,
partsMetadata, diskCount(outDatedDisks))

@ -189,17 +189,9 @@ func newXLMetaV1(object string, dataBlocks, parityBlocks int) (xlMeta xlMetaV1)
// Return a new xlMetaV1 initialized using the given xlMetaV1. Used in healing to make sure that we do not copy
// over any part's checksum info which will differ for different disks.
func newXLMetaFromXLMeta(meta xlMetaV1) xlMetaV1 {
xlMeta := xlMetaV1{}
xlMeta.Version = xlMetaVersion
xlMeta.Format = xlMetaFormat
xlMeta.Minio.Release = ReleaseTag
xlMeta.Erasure = ErasureInfo{
Algorithm: meta.Erasure.Algorithm,
DataBlocks: meta.Erasure.DataBlocks,
ParityBlocks: meta.Erasure.DataBlocks,
BlockSize: meta.Erasure.BlockSize,
Distribution: meta.Erasure.Distribution,
}
xlMeta := meta
xlMeta.Erasure.Checksums = nil
xlMeta.Parts = nil
return xlMeta
}

Loading…
Cancel
Save