Streaming bitrot verification support (#7004)
parent
94c52e3816
commit
98c950aacd
@ -0,0 +1,172 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2019 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"encoding/hex" |
||||
"hash" |
||||
"io" |
||||
|
||||
"github.com/minio/minio/cmd/logger" |
||||
) |
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct { |
||||
iow *io.PipeWriter |
||||
h hash.Hash |
||||
shardSize int64 |
||||
canClose chan struct{} // Needed to avoid race explained in Close() call.
|
||||
|
||||
// Following two fields are used only to make sure that Write(p) is called such that
|
||||
// len(p) is always the block size except the last block, i.e prevent programmer errors.
|
||||
currentBlockIdx int |
||||
verifyTillIdx int |
||||
} |
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) { |
||||
if b.currentBlockIdx < b.verifyTillIdx && int64(len(p)) != b.shardSize { |
||||
// All blocks except last should be of the length b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected) |
||||
return 0, errUnexpected |
||||
} |
||||
if len(p) == 0 { |
||||
return 0, nil |
||||
} |
||||
b.h.Reset() |
||||
b.h.Write(p) |
||||
hashBytes := b.h.Sum(nil) |
||||
n, err := b.iow.Write(hashBytes) |
||||
if n != len(hashBytes) { |
||||
logger.LogIf(context.Background(), err) |
||||
return 0, err |
||||
} |
||||
n, err = b.iow.Write(p) |
||||
b.currentBlockIdx++ |
||||
return n, err |
||||
} |
||||
|
||||
func (b *streamingBitrotWriter) Close() error { |
||||
err := b.iow.Close() |
||||
// Wait for all data to be written before returning else it causes race conditions.
|
||||
// Race condition is because of io.PipeWriter implementation. i.e consider the following
|
||||
// sequent of operations:
|
||||
// 1) pipe.Write()
|
||||
// 2) pipe.Close()
|
||||
// Now pipe.Close() can return before the data is read on the other end of the pipe and written to the disk
|
||||
// Hence an immediate Read() on the file can return incorrect data.
|
||||
<-b.canClose |
||||
return err |
||||
} |
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser { |
||||
r, w := io.Pipe() |
||||
h := algo.New() |
||||
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{}), 0, int(length / shardSize)} |
||||
go func() { |
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize := bitrotSumsTotalSize + length |
||||
err := disk.CreateFile(volume, filePath, totalFileSize, r) |
||||
if err != nil { |
||||
logger.LogIf(context.Background(), err) |
||||
r.CloseWithError(err) |
||||
} |
||||
close(bw.canClose) |
||||
}() |
||||
return bw |
||||
} |
||||
|
||||
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
|
||||
type streamingBitrotReader struct { |
||||
disk StorageAPI |
||||
rc io.ReadCloser |
||||
volume string |
||||
filePath string |
||||
tillOffset int64 |
||||
currOffset int64 |
||||
h hash.Hash |
||||
shardSize int64 |
||||
hashBytes []byte |
||||
} |
||||
|
||||
func (b *streamingBitrotReader) Close() error { |
||||
if b.rc == nil { |
||||
return nil |
||||
} |
||||
return b.rc.Close() |
||||
} |
||||
|
||||
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) { |
||||
var err error |
||||
if offset%b.shardSize != 0 { |
||||
// Offset should always be aligned to b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected) |
||||
return 0, errUnexpected |
||||
} |
||||
if b.rc == nil { |
||||
// For the first ReadAt() call we need to open the stream for reading.
|
||||
b.currOffset = offset |
||||
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset |
||||
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset) |
||||
if err != nil { |
||||
logger.LogIf(context.Background(), err) |
||||
return 0, err |
||||
} |
||||
} |
||||
if offset != b.currOffset { |
||||
logger.LogIf(context.Background(), errUnexpected) |
||||
return 0, errUnexpected |
||||
} |
||||
b.h.Reset() |
||||
_, err = io.ReadFull(b.rc, b.hashBytes) |
||||
if err != nil { |
||||
logger.LogIf(context.Background(), err) |
||||
return 0, err |
||||
} |
||||
_, err = io.ReadFull(b.rc, buf) |
||||
if err != nil { |
||||
logger.LogIf(context.Background(), err) |
||||
return 0, err |
||||
} |
||||
b.h.Write(buf) |
||||
|
||||
if bytes.Compare(b.h.Sum(nil), b.hashBytes) != 0 { |
||||
err = hashMismatchError{hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))} |
||||
logger.LogIf(context.Background(), err) |
||||
return 0, err |
||||
} |
||||
b.currOffset += int64(len(buf)) |
||||
return len(buf), nil |
||||
} |
||||
|
||||
// Returns streaming bitrot reader implementation.
|
||||
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader { |
||||
h := algo.New() |
||||
return &streamingBitrotReader{ |
||||
disk, |
||||
nil, |
||||
volume, |
||||
filePath, |
||||
ceilFrac(tillOffset, shardSize)*int64(h.Size()) + tillOffset, |
||||
0, |
||||
h, |
||||
shardSize, |
||||
make([]byte, h.Size()), |
||||
} |
||||
} |
@ -0,0 +1,109 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2019 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"context" |
||||
"hash" |
||||
"io" |
||||
|
||||
"github.com/minio/minio/cmd/logger" |
||||
) |
||||
|
||||
// Implementation to calculate bitrot for the whole file.
|
||||
type wholeBitrotWriter struct { |
||||
disk StorageAPI |
||||
volume string |
||||
filePath string |
||||
shardSize int64 // This is the shard size of the erasure logic
|
||||
hash.Hash // For bitrot hash
|
||||
|
||||
// Following two fields are used only to make sure that Write(p) is called such that
|
||||
// len(p) is always the block size except the last block and prevent programmer errors.
|
||||
currentBlockIdx int |
||||
lastBlockIdx int |
||||
} |
||||
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) { |
||||
if b.currentBlockIdx < b.lastBlockIdx && int64(len(p)) != b.shardSize { |
||||
// All blocks except last should be of the length b.shardSize
|
||||
logger.LogIf(context.Background(), errUnexpected) |
||||
return 0, errUnexpected |
||||
} |
||||
err := b.disk.AppendFile(b.volume, b.filePath, p) |
||||
if err != nil { |
||||
logger.LogIf(context.Background(), err) |
||||
return 0, err |
||||
} |
||||
_, err = b.Hash.Write(p) |
||||
if err != nil { |
||||
logger.LogIf(context.Background(), err) |
||||
return 0, err |
||||
} |
||||
b.currentBlockIdx++ |
||||
return len(p), nil |
||||
} |
||||
|
||||
func (b *wholeBitrotWriter) Close() error { |
||||
return nil |
||||
} |
||||
|
||||
// Returns whole-file bitrot writer.
|
||||
func newWholeBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser { |
||||
return &wholeBitrotWriter{disk, volume, filePath, shardSize, algo.New(), 0, int(length / shardSize)} |
||||
} |
||||
|
||||
// Implementation to verify bitrot for the whole file.
|
||||
type wholeBitrotReader struct { |
||||
disk StorageAPI |
||||
volume string |
||||
filePath string |
||||
verifier *BitrotVerifier // Holds the bit-rot info
|
||||
tillOffset int64 // Affects the length of data requested in disk.ReadFile depending on Read()'s offset
|
||||
buf []byte // Holds bit-rot verified data
|
||||
} |
||||
|
||||
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) { |
||||
if b.buf == nil { |
||||
b.buf = make([]byte, b.tillOffset-offset) |
||||
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil { |
||||
ctx := context.Background() |
||||
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String()) |
||||
logger.LogIf(ctx, err) |
||||
return 0, err |
||||
} |
||||
} |
||||
if len(b.buf) < len(buf) { |
||||
logger.LogIf(context.Background(), errLessData) |
||||
return 0, errLessData |
||||
} |
||||
n = copy(buf, b.buf) |
||||
b.buf = b.buf[n:] |
||||
return n, nil |
||||
} |
||||
|
||||
// Returns whole-file bitrot reader.
|
||||
func newWholeBitrotReader(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, tillOffset int64, sum []byte) *wholeBitrotReader { |
||||
return &wholeBitrotReader{ |
||||
disk: disk, |
||||
volume: volume, |
||||
filePath: filePath, |
||||
verifier: &BitrotVerifier{algo, sum}, |
||||
tillOffset: tillOffset, |
||||
buf: nil, |
||||
} |
||||
} |
Loading…
Reference in new issue