From 52f6d5aafc7327b4c2793cf96d0022f55055a6b7 Mon Sep 17 00:00:00 2001 From: Krishna Srinivas <634494+krishnasrinivas@users.noreply.github.com> Date: Thu, 23 Aug 2018 23:35:37 -0700 Subject: [PATCH] Rename of structs and methods (#6230) Rename of ErasureStorage to Erasure (and rename of related variables and methods) --- cmd/bitrot.go | 4 +- ...{erasure-readfile.go => erasure-decode.go} | 24 ++-- ...eadfile_test.go => erasure-decode_test.go} | 114 +++++++++--------- ...rasure-createfile.go => erasure-encode.go} | 6 +- ...atefile_test.go => erasure-encode_test.go} | 70 +++++------ cmd/{erasure-healfile.go => erasure-heal.go} | 10 +- ...-healfile_test.go => erasure-heal_test.go} | 14 +-- cmd/erasure.go | 43 +++---- cmd/erasure_test.go | 16 +-- cmd/fs-v1.go | 2 +- cmd/metrics.go | 2 +- cmd/object-api-datatypes.go | 6 +- cmd/server-startup-msg.go | 2 +- cmd/server-startup-msg_test.go | 2 +- cmd/storage-rpc_test.go | 2 - cmd/xl-sets.go | 2 +- cmd/xl-v1-healing.go | 8 +- cmd/xl-v1-multipart.go | 4 +- cmd/xl-v1-object.go | 8 +- cmd/xl-v1.go | 5 +- 20 files changed, 171 insertions(+), 173 deletions(-) rename cmd/{erasure-readfile.go => erasure-decode.go} (83%) rename cmd/{erasure-readfile_test.go => erasure-decode_test.go} (80%) rename cmd/{erasure-createfile.go => erasure-encode.go} (89%) rename cmd/{erasure-createfile_test.go => erasure-encode_test.go} (76%) rename cmd/{erasure-healfile.go => erasure-heal.go} (75%) rename cmd/{erasure-healfile_test.go => erasure-heal_test.go} (93%) diff --git a/cmd/bitrot.go b/cmd/bitrot.go index e90e80b3c..d93441bb0 100644 --- a/cmd/bitrot.go +++ b/cmd/bitrot.go @@ -23,7 +23,7 @@ import ( "github.com/minio/highwayhash" "github.com/minio/minio/cmd/logger" - "github.com/minio/sha256-simd" + sha256 "github.com/minio/sha256-simd" "golang.org/x/crypto/blake2b" ) @@ -70,7 +70,7 @@ func (a BitrotAlgorithm) New() hash.Hash { } } -// Available reports whether the given algorihm is available. +// Available reports whether the given algorithm is available. func (a BitrotAlgorithm) Available() bool { _, ok := bitrotAlgorithms[a] return ok diff --git a/cmd/erasure-readfile.go b/cmd/erasure-decode.go similarity index 83% rename from cmd/erasure-readfile.go rename to cmd/erasure-decode.go index d42620b7b..93583f3ec 100644 --- a/cmd/erasure-readfile.go +++ b/cmd/erasure-decode.go @@ -127,8 +127,8 @@ func (p *parallelReader) Read() ([][]byte, error) { return nil, errXLReadQuorum } -// ReadFile reads from readers, reconstructs data if needed and writes the data to the writer. -func (s ErasureStorage) ReadFile(ctx context.Context, writer io.Writer, readers []*bitrotReader, offset, length, totalLength int64) error { +// Decode reads from readers, reconstructs data if needed and writes the data to the writer. +func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []*bitrotReader, offset, length, totalLength int64) error { if offset < 0 || length < 0 { logger.LogIf(ctx, errInvalidArgument) return errInvalidArgument @@ -141,27 +141,27 @@ func (s ErasureStorage) ReadFile(ctx context.Context, writer io.Writer, readers return nil } - reader := newParallelReader(readers, s.dataBlocks, offset, totalLength, s.blockSize) + reader := newParallelReader(readers, e.dataBlocks, offset, totalLength, e.blockSize) - startBlock := offset / s.blockSize - endBlock := (offset + length) / s.blockSize + startBlock := offset / e.blockSize + endBlock := (offset + length) / e.blockSize var bytesWritten int64 for block := startBlock; block <= endBlock; block++ { var blockOffset, blockLength int64 switch { case startBlock == endBlock: - blockOffset = offset % s.blockSize + blockOffset = offset % e.blockSize blockLength = length case block == startBlock: - blockOffset = offset % s.blockSize - blockLength = s.blockSize - blockOffset + blockOffset = offset % e.blockSize + blockLength = e.blockSize - blockOffset case block == endBlock: blockOffset = 0 - blockLength = (offset + length) % s.blockSize + blockLength = (offset + length) % e.blockSize default: blockOffset = 0 - blockLength = s.blockSize + blockLength = e.blockSize } if blockLength == 0 { break @@ -170,11 +170,11 @@ func (s ErasureStorage) ReadFile(ctx context.Context, writer io.Writer, readers if err != nil { return err } - if err = s.ErasureDecodeDataBlocks(bufs); err != nil { + if err = e.DecodeDataBlocks(bufs); err != nil { logger.LogIf(ctx, err) return err } - n, err := writeDataBlocks(ctx, writer, bufs, s.dataBlocks, blockOffset, blockLength) + n, err := writeDataBlocks(ctx, writer, bufs, e.dataBlocks, blockOffset, blockLength) if err != nil { return err } diff --git a/cmd/erasure-readfile_test.go b/cmd/erasure-decode_test.go similarity index 80% rename from cmd/erasure-readfile_test.go rename to cmd/erasure-decode_test.go index e18e1a6b8..e506b6a7e 100644 --- a/cmd/erasure-readfile_test.go +++ b/cmd/erasure-decode_test.go @@ -32,7 +32,7 @@ func (d badDisk) ReadFile(volume string, path string, offset int64, buf []byte, return 0, errFaultyDisk } -var erasureReadFileTests = []struct { +var erasureDecodeTests = []struct { dataBlocks int onDisks, offDisks int blocksize, data int64 @@ -81,13 +81,13 @@ var erasureReadFileTests = []struct { {dataBlocks: 8, onDisks: 12, offDisks: 4, blocksize: int64(blockSizeV1), data: int64(2 * blockSizeV1), offset: 11, length: int64(blockSizeV1) + 2*1024, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 37 } -func TestErasureReadFile(t *testing.T) { - for i, test := range erasureReadFileTests { +func TestErasureDecode(t *testing.T) { + for i, test := range erasureDecodeTests { setup, err := newErasureTestSetup(test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) if err != nil { t.Fatalf("Test %d: failed to create test setup: %v", i, err) } - storage, err := NewErasureStorage(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) + erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) if err != nil { setup.Remove() t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) @@ -108,7 +108,7 @@ func TestErasureReadFile(t *testing.T) { for i, disk := range disks { writers[i] = newBitrotWriter(disk, "testbucket", "object", writeAlgorithm) } - n, err := storage.CreateFile(context.Background(), bytes.NewReader(data[:]), writers, buffer, storage.dataBlocks+1) + n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1) if err != nil { setup.Remove() t.Fatalf("Test %d: failed to create erasure test file: %v", i, err) @@ -129,12 +129,12 @@ func TestErasureReadFile(t *testing.T) { if disk == OfflineDisk { continue } - endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, storage.dataBlocks) + endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, erasure.dataBlocks) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", writeAlgorithm, endOffset, writers[index].Sum()) } writer := bytes.NewBuffer(nil) - err = storage.ReadFile(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data) + err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data) if err != nil && !test.shouldFail { t.Errorf("Test %d: should pass but failed with: %v", i, err) } @@ -157,7 +157,7 @@ func TestErasureReadFile(t *testing.T) { if disk == OfflineDisk { continue } - endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, storage.dataBlocks) + endOffset := getErasureShardFileEndOffset(test.offset, test.length, test.data, test.blocksize, erasure.dataBlocks) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", writeAlgorithm, endOffset, writers[index].Sum()) } for j := range disks[:test.offDisks] { @@ -167,7 +167,7 @@ func TestErasureReadFile(t *testing.T) { bitrotReaders[0] = nil } writer.Reset() - err = storage.ReadFile(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data) + err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data) if err != nil && !test.shouldFailQuorum { t.Errorf("Test %d: should pass but failed with: %v", i, err) } @@ -184,10 +184,10 @@ func TestErasureReadFile(t *testing.T) { } } -// Test erasureReadFile with random offset and lengths. +// Test erasureDecode with random offset and lengths. // This test is t.Skip()ed as it a long time to run, hence should be run // explicitly after commenting out t.Skip() -func TestErasureReadFileRandomOffsetLength(t *testing.T) { +func TestErasureDecodeRandomOffsetLength(t *testing.T) { // Comment the following line to run this test. t.SkipNow() // Initialize environment needed for the test. @@ -201,7 +201,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) { } defer setup.Remove() disks := setup.disks - storage, err := NewErasureStorage(context.Background(), dataBlocks, parityBlocks, blockSize) + erasure, err := NewErasure(context.Background(), dataBlocks, parityBlocks, blockSize) if err != nil { t.Fatalf("failed to create ErasureStorage: %v", err) } @@ -226,7 +226,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) { // Create a test file to read from. buffer := make([]byte, blockSize, 2*blockSize) - n, err := storage.CreateFile(context.Background(), bytes.NewReader(data), writers, buffer, storage.dataBlocks+1) + n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) if err != nil { t.Fatal(err) } @@ -239,7 +239,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) { buf := &bytes.Buffer{} - // Verify erasureReadFile() for random offsets and lengths. + // Verify erasure.Decode() for random offsets and lengths. for i := 0; i < iterations; i++ { offset := r.Int63n(length) readLen := r.Int63n(length - offset) @@ -252,10 +252,10 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) { if disk == OfflineDisk { continue } - endOffset := getErasureShardFileEndOffset(offset, readLen, length, blockSize, storage.dataBlocks) + endOffset := getErasureShardFileEndOffset(offset, readLen, length, blockSize, erasure.dataBlocks) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", DefaultBitrotAlgorithm, endOffset, writers[index].Sum()) } - err = storage.ReadFile(context.Background(), buf, bitrotReaders, offset, readLen, length) + err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length) if err != nil { t.Fatal(err, offset, readLen) } @@ -269,14 +269,14 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) { // Benchmarks -func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b *testing.B) { +func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64, b *testing.B) { setup, err := newErasureTestSetup(data, parity, blockSizeV1) if err != nil { b.Fatalf("failed to create test setup: %v", err) } defer setup.Remove() disks := setup.disks - storage, err := NewErasureStorage(context.Background(), data, parity, blockSizeV1) + erasure, err := NewErasure(context.Background(), data, parity, blockSizeV1) if err != nil { b.Fatalf("failed to create ErasureStorage: %v", err) } @@ -291,7 +291,7 @@ func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b content := make([]byte, size) buffer := make([]byte, blockSizeV1, 2*blockSizeV1) - _, err = storage.CreateFile(context.Background(), bytes.NewReader(content), writers, buffer, storage.dataBlocks+1) + _, err = erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1) if err != nil { b.Fatalf("failed to create erasure test file: %v", err) } @@ -312,62 +312,62 @@ func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b if writers[index] == nil { continue } - endOffset := getErasureShardFileEndOffset(0, size, size, storage.blockSize, storage.dataBlocks) + endOffset := getErasureShardFileEndOffset(0, size, size, erasure.blockSize, erasure.dataBlocks) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", DefaultBitrotAlgorithm, endOffset, writers[index].Sum()) } - if err = storage.ReadFile(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size); err != nil { + if err = erasure.Decode(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size); err != nil { panic(err) } } } -func BenchmarkErasureReadQuick(b *testing.B) { +func BenchmarkErasureDecodeQuick(b *testing.B) { const size = 12 * 1024 * 1024 - b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 0, 0, size, b) }) - b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 0, 1, size, b) }) - b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 1, 0, size, b) }) - b.Run(" X0|X0 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 1, 1, size, b) }) + b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 0, size, b) }) + b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 1, size, b) }) + b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 0, size, b) }) + b.Run(" X0|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 1, size, b) }) } -func BenchmarkErasureRead_4_64KB(b *testing.B) { +func BenchmarkErasureDecode_4_64KB(b *testing.B) { const size = 64 * 1024 - b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 0, 0, size, b) }) - b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 0, 1, size, b) }) - b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 1, 0, size, b) }) - b.Run(" X0|X0 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 1, 1, size, b) }) - b.Run(" 00|XX ", func(b *testing.B) { benchmarkErasureRead(2, 2, 0, 2, size, b) }) - b.Run(" XX|00 ", func(b *testing.B) { benchmarkErasureRead(2, 2, 2, 0, size, b) }) + b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 0, size, b) }) + b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 1, size, b) }) + b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 0, size, b) }) + b.Run(" X0|X0 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 1, 1, size, b) }) + b.Run(" 00|XX ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 0, 2, size, b) }) + b.Run(" XX|00 ", func(b *testing.B) { benchmarkErasureDecode(2, 2, 2, 0, size, b) }) } -func BenchmarkErasureRead_8_20MB(b *testing.B) { +func BenchmarkErasureDecode_8_20MB(b *testing.B) { const size = 20 * 1024 * 1024 - b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 0, 0, size, b) }) - b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 0, 1, size, b) }) - b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 1, 0, size, b) }) - b.Run(" X000|X000 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 1, 1, size, b) }) - b.Run(" 0000|XXXX ", func(b *testing.B) { benchmarkErasureRead(4, 4, 0, 4, size, b) }) - b.Run(" XX00|XX00 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 2, 2, size, b) }) - b.Run(" XXXX|0000 ", func(b *testing.B) { benchmarkErasureRead(4, 4, 4, 0, size, b) }) + b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 0, 0, size, b) }) + b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 0, 1, size, b) }) + b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 1, 0, size, b) }) + b.Run(" X000|X000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 1, 1, size, b) }) + b.Run(" 0000|XXXX ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 0, 4, size, b) }) + b.Run(" XX00|XX00 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 2, 2, size, b) }) + b.Run(" XXXX|0000 ", func(b *testing.B) { benchmarkErasureDecode(4, 4, 4, 0, size, b) }) } -func BenchmarkErasureRead_12_30MB(b *testing.B) { +func BenchmarkErasureDecode_12_30MB(b *testing.B) { const size = 30 * 1024 * 1024 - b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 0, 0, size, b) }) - b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 0, 1, size, b) }) - b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 1, 0, size, b) }) - b.Run(" X00000|X00000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 1, 1, size, b) }) - b.Run(" 000000|XXXXXX ", func(b *testing.B) { benchmarkErasureRead(6, 6, 0, 6, size, b) }) - b.Run(" XXX000|XXX000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 3, 3, size, b) }) - b.Run(" XXXXXX|000000 ", func(b *testing.B) { benchmarkErasureRead(6, 6, 6, 0, size, b) }) + b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 0, 0, size, b) }) + b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 0, 1, size, b) }) + b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 1, 0, size, b) }) + b.Run(" X00000|X00000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 1, 1, size, b) }) + b.Run(" 000000|XXXXXX ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 0, 6, size, b) }) + b.Run(" XXX000|XXX000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 3, 3, size, b) }) + b.Run(" XXXXXX|000000 ", func(b *testing.B) { benchmarkErasureDecode(6, 6, 6, 0, size, b) }) } -func BenchmarkErasureRead_16_40MB(b *testing.B) { +func BenchmarkErasureDecode_16_40MB(b *testing.B) { const size = 40 * 1024 * 1024 - b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 0, 0, size, b) }) - b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 0, 1, size, b) }) - b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 1, 0, size, b) }) - b.Run(" X0000000|X0000000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 1, 1, size, b) }) - b.Run(" 00000000|XXXXXXXX ", func(b *testing.B) { benchmarkErasureRead(8, 8, 0, 8, size, b) }) - b.Run(" XXXX0000|XXXX0000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 4, 4, size, b) }) - b.Run(" XXXXXXXX|00000000 ", func(b *testing.B) { benchmarkErasureRead(8, 8, 8, 0, size, b) }) + b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 0, 0, size, b) }) + b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 0, 1, size, b) }) + b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 1, 0, size, b) }) + b.Run(" X0000000|X0000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 1, 1, size, b) }) + b.Run(" 00000000|XXXXXXXX ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 0, 8, size, b) }) + b.Run(" XXXX0000|XXXX0000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 4, 4, size, b) }) + b.Run(" XXXXXXXX|00000000 ", func(b *testing.B) { benchmarkErasureDecode(8, 8, 8, 0, size, b) }) } diff --git a/cmd/erasure-createfile.go b/cmd/erasure-encode.go similarity index 89% rename from cmd/erasure-createfile.go rename to cmd/erasure-encode.go index 1d0e28361..c3ac0e951 100644 --- a/cmd/erasure-createfile.go +++ b/cmd/erasure-encode.go @@ -69,8 +69,8 @@ func (p *parallelWriter) Append(ctx context.Context, blocks [][]byte) error { return reduceWriteQuorumErrs(ctx, p.errs, objectOpIgnoredErrs, p.writeQuorum) } -// CreateFile reads from the reader, erasure-encodes the data and writes to the writers. -func (s *ErasureStorage) CreateFile(ctx context.Context, src io.Reader, writers []*bitrotWriter, buf []byte, quorum int) (total int64, err error) { +// Encode reads from the reader, erasure-encodes the data and writes to the writers. +func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []*bitrotWriter, buf []byte, quorum int) (total int64, err error) { writer := ¶llelWriter{ writers: writers, writeQuorum: quorum, @@ -90,7 +90,7 @@ func (s *ErasureStorage) CreateFile(ctx context.Context, src io.Reader, writers break } // We take care of the situation where if n == 0 and total == 0 by creating empty data and parity files. - blocks, err = s.ErasureEncode(ctx, buf[:n]) + blocks, err = e.EncodeData(ctx, buf[:n]) if err != nil { logger.LogIf(ctx, err) return 0, err diff --git a/cmd/erasure-createfile_test.go b/cmd/erasure-encode_test.go similarity index 76% rename from cmd/erasure-createfile_test.go rename to cmd/erasure-encode_test.go index 15accf800..87cf935bc 100644 --- a/cmd/erasure-createfile_test.go +++ b/cmd/erasure-encode_test.go @@ -34,7 +34,7 @@ func (a badDisk) AppendFile(volume string, path string, buf []byte) error { const oneMiByte = 1 * humanize.MiByte -var erasureCreateFileTests = []struct { +var erasureEncodeTests = []struct { dataBlocks int onDisks, offDisks int blocksize, data int64 @@ -64,14 +64,14 @@ var erasureCreateFileTests = []struct { {dataBlocks: 10, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV1), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 19 } -func TestErasureCreateFile(t *testing.T) { - for i, test := range erasureCreateFileTests { +func TestErasureEncode(t *testing.T) { + for i, test := range erasureEncodeTests { setup, err := newErasureTestSetup(test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) if err != nil { t.Fatalf("Test %d: failed to create test setup: %v", i, err) } disks := setup.disks - storage, err := NewErasureStorage(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) + erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) if err != nil { setup.Remove() t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) @@ -90,7 +90,7 @@ func TestErasureCreateFile(t *testing.T) { } writers[i] = newBitrotWriter(disk, "testbucket", "object", test.algorithm) } - n, err := storage.CreateFile(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, storage.dataBlocks+1) + n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1) if err != nil && !test.shouldFail { t.Errorf("Test %d: should pass but failed with: %v", i, err) } @@ -119,7 +119,7 @@ func TestErasureCreateFile(t *testing.T) { if test.offDisks > 0 { writers[0] = nil } - n, err = storage.CreateFile(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, storage.dataBlocks+1) + n, err = erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1) if err != nil && !test.shouldFailQuorum { t.Errorf("Test %d: should pass but failed with: %v", i, err) } @@ -138,13 +138,13 @@ func TestErasureCreateFile(t *testing.T) { // Benchmarks -func benchmarkErasureWrite(data, parity, dataDown, parityDown int, size int64, b *testing.B) { +func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64, b *testing.B) { setup, err := newErasureTestSetup(data, parity, blockSizeV1) if err != nil { b.Fatalf("failed to create test setup: %v", err) } defer setup.Remove() - storage, err := NewErasureStorage(context.Background(), data, parity, blockSizeV1) + erasure, err := NewErasure(context.Background(), data, parity, blockSizeV1) if err != nil { b.Fatalf("failed to create ErasureStorage: %v", err) } @@ -170,50 +170,50 @@ func benchmarkErasureWrite(data, parity, dataDown, parityDown int, size int64, b } writers[i] = newBitrotWriter(disk, "testbucket", "object", DefaultBitrotAlgorithm) } - _, err := storage.CreateFile(context.Background(), bytes.NewReader(content), writers, buffer, storage.dataBlocks+1) + _, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1) if err != nil { panic(err) } } } -func BenchmarkErasureWriteQuick(b *testing.B) { +func BenchmarkErasureEncodeQuick(b *testing.B) { const size = 12 * 1024 * 1024 - b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 0, 0, size, b) }) - b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 0, 1, size, b) }) - b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 1, 0, size, b) }) + b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 0, size, b) }) + b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 1, size, b) }) + b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 1, 0, size, b) }) } -func BenchmarkErasureWrite_4_64KB(b *testing.B) { +func BenchmarkErasureEncode_4_64KB(b *testing.B) { const size = 64 * 1024 - b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 0, 0, size, b) }) - b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 0, 1, size, b) }) - b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureWrite(2, 2, 1, 0, size, b) }) + b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 0, size, b) }) + b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 1, size, b) }) + b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 1, 0, size, b) }) } -func BenchmarkErasureWrite_8_20MB(b *testing.B) { +func BenchmarkErasureEncode_8_20MB(b *testing.B) { const size = 20 * 1024 * 1024 - b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureWrite(4, 4, 0, 0, size, b) }) - b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureWrite(4, 4, 0, 1, size, b) }) - b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureWrite(4, 4, 1, 0, size, b) }) - b.Run(" 0000|XXX0 ", func(b *testing.B) { benchmarkErasureWrite(4, 4, 0, 3, size, b) }) - b.Run(" XXX0|0000 ", func(b *testing.B) { benchmarkErasureWrite(4, 4, 3, 0, size, b) }) + b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 0, size, b) }) + b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 1, size, b) }) + b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 1, 0, size, b) }) + b.Run(" 0000|XXX0 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 3, size, b) }) + b.Run(" XXX0|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 3, 0, size, b) }) } -func BenchmarkErasureWrite_12_30MB(b *testing.B) { +func BenchmarkErasureEncode_12_30MB(b *testing.B) { const size = 30 * 1024 * 1024 - b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureWrite(6, 6, 0, 0, size, b) }) - b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureWrite(6, 6, 0, 1, size, b) }) - b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureWrite(6, 6, 1, 0, size, b) }) - b.Run(" 000000|XXXXX0 ", func(b *testing.B) { benchmarkErasureWrite(6, 6, 0, 5, size, b) }) - b.Run(" XXXXX0|000000 ", func(b *testing.B) { benchmarkErasureWrite(6, 6, 5, 0, size, b) }) + b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 0, size, b) }) + b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 1, size, b) }) + b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 1, 0, size, b) }) + b.Run(" 000000|XXXXX0 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 5, size, b) }) + b.Run(" XXXXX0|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 5, 0, size, b) }) } -func BenchmarkErasureWrite_16_40MB(b *testing.B) { +func BenchmarkErasureEncode_16_40MB(b *testing.B) { const size = 40 * 1024 * 1024 - b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureWrite(8, 8, 0, 0, size, b) }) - b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureWrite(8, 8, 0, 1, size, b) }) - b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureWrite(8, 8, 1, 0, size, b) }) - b.Run(" 00000000|XXXXXXX0 ", func(b *testing.B) { benchmarkErasureWrite(8, 8, 0, 7, size, b) }) - b.Run(" XXXXXXX0|00000000 ", func(b *testing.B) { benchmarkErasureWrite(8, 8, 7, 0, size, b) }) + b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 0, size, b) }) + b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 1, size, b) }) + b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 1, 0, size, b) }) + b.Run(" 00000000|XXXXXXX0 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 7, size, b) }) + b.Run(" XXXXXXX0|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 7, 0, size, b) }) } diff --git a/cmd/erasure-healfile.go b/cmd/erasure-heal.go similarity index 75% rename from cmd/erasure-healfile.go rename to cmd/erasure-heal.go index 7c652798a..344ce0f02 100644 --- a/cmd/erasure-healfile.go +++ b/cmd/erasure-heal.go @@ -23,20 +23,20 @@ import ( "github.com/minio/minio/cmd/logger" ) -// HealFile heals the shard files on non-nil writers. Note that the quorum passed is 1 +// Heal heals the shard files on non-nil writers. Note that the quorum passed is 1 // as healing should continue even if it has been successful healing only one shard file. -func (s ErasureStorage) HealFile(ctx context.Context, readers []*bitrotReader, writers []*bitrotWriter, size int64) error { +func (e Erasure) Heal(ctx context.Context, readers []*bitrotReader, writers []*bitrotWriter, size int64) error { r, w := io.Pipe() go func() { - if err := s.ReadFile(ctx, w, readers, 0, size, size); err != nil { + if err := e.Decode(ctx, w, readers, 0, size, size); err != nil { w.CloseWithError(err) return } w.Close() }() - buf := make([]byte, s.blockSize) + buf := make([]byte, e.blockSize) // quorum is 1 because CreateFile should continue writing as long as we are writing to even 1 disk. - n, err := s.CreateFile(ctx, r, writers, buf, 1) + n, err := e.Encode(ctx, r, writers, buf, 1) if err != nil { return err } diff --git a/cmd/erasure-healfile_test.go b/cmd/erasure-heal_test.go similarity index 93% rename from cmd/erasure-healfile_test.go rename to cmd/erasure-heal_test.go index d70740893..5a52d6a6d 100644 --- a/cmd/erasure-healfile_test.go +++ b/cmd/erasure-heal_test.go @@ -24,7 +24,7 @@ import ( "testing" ) -var erasureHealFileTests = []struct { +var erasureHealTests = []struct { dataBlocks, disks int // number of offline disks is also number of staleDisks for @@ -60,8 +60,8 @@ var erasureHealFileTests = []struct { {dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badStaleDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte * 64, algorithm: SHA256, shouldFail: false}, // 19 } -func TestErasureHealFile(t *testing.T) { - for i, test := range erasureHealFileTests { +func TestErasureHeal(t *testing.T) { + for i, test := range erasureHealTests { if test.offDisks < test.badStaleDisks { // test case sanity check t.Fatalf("Test %d: Bad test case - number of stale disks cannot be less than number of badstale disks", i) @@ -73,7 +73,7 @@ func TestErasureHealFile(t *testing.T) { t.Fatalf("Test %d: failed to setup XL environment: %v", i, err) } disks := setup.disks - storage, err := NewErasureStorage(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) + erasure, err := NewErasure(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) if err != nil { setup.Remove() t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) @@ -88,7 +88,7 @@ func TestErasureHealFile(t *testing.T) { for i, disk := range disks { writers[i] = newBitrotWriter(disk, "testbucket", "testobject", test.algorithm) } - _, err = storage.CreateFile(context.Background(), bytes.NewReader(data), writers, buffer, storage.dataBlocks+1) + _, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) if err != nil { setup.Remove() t.Fatalf("Test %d: failed to create random test data: %v", i, err) @@ -96,7 +96,7 @@ func TestErasureHealFile(t *testing.T) { readers := make([]*bitrotReader, len(disks)) for i, disk := range disks { - shardFilesize := getErasureShardFileSize(test.blocksize, test.size, storage.dataBlocks) + shardFilesize := getErasureShardFileSize(test.blocksize, test.size, erasure.dataBlocks) readers[i] = newBitrotReader(disk, "testbucket", "testobject", test.algorithm, shardFilesize, writers[i].Sum()) } @@ -126,7 +126,7 @@ func TestErasureHealFile(t *testing.T) { } // test case setup is complete - now call Healfile() - err = storage.HealFile(context.Background(), readers, staleWriters, test.size) + err = erasure.Heal(context.Background(), readers, staleWriters, test.size) if err != nil && !test.shouldFail { t.Errorf("Test %d: should pass but it failed with: %v", i, err) } diff --git a/cmd/erasure.go b/cmd/erasure.go index 59afbdd2f..229b8b310 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -23,26 +23,23 @@ import ( "github.com/minio/minio/cmd/logger" ) -// OfflineDisk represents an unavailable disk. -var OfflineDisk StorageAPI // zero value is nil - -// ErasureStorage - erasure encoding details. -type ErasureStorage struct { - erasure reedsolomon.Encoder +// Erasure - erasure encoding details. +type Erasure struct { + encoder reedsolomon.Encoder dataBlocks, parityBlocks int blockSize int64 } -// NewErasureStorage creates a new ErasureStorage. -func NewErasureStorage(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (s ErasureStorage, err error) { +// NewErasure creates a new ErasureStorage. +func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) { shardsize := int(ceilFrac(blockSize, int64(dataBlocks))) erasure, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(shardsize)) if err != nil { logger.LogIf(ctx, err) - return s, err + return e, err } - s = ErasureStorage{ - erasure: erasure, + e = Erasure{ + encoder: erasure, dataBlocks: dataBlocks, parityBlocks: parityBlocks, blockSize: blockSize, @@ -50,30 +47,30 @@ func NewErasureStorage(ctx context.Context, dataBlocks, parityBlocks int, blockS return } -// ErasureEncode encodes the given data and returns the erasure-coded data. +// EncodeData encodes the given data and returns the erasure-coded data. // It returns an error if the erasure coding failed. -func (s *ErasureStorage) ErasureEncode(ctx context.Context, data []byte) ([][]byte, error) { +func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error) { if len(data) == 0 { - return make([][]byte, s.dataBlocks+s.parityBlocks), nil + return make([][]byte, e.dataBlocks+e.parityBlocks), nil } - encoded, err := s.erasure.Split(data) + encoded, err := e.encoder.Split(data) if err != nil { logger.LogIf(ctx, err) return nil, err } - if err = s.erasure.Encode(encoded); err != nil { + if err = e.encoder.Encode(encoded); err != nil { logger.LogIf(ctx, err) return nil, err } return encoded, nil } -// ErasureDecodeDataBlocks decodes the given erasure-coded data. +// DecodeDataBlocks decodes the given erasure-coded data. // It only decodes the data blocks but does not verify them. // It returns an error if the decoding failed. -func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error { +func (e *Erasure) DecodeDataBlocks(data [][]byte) error { needsReconstruction := false - for _, b := range data[:s.dataBlocks] { + for _, b := range data[:e.dataBlocks] { if b == nil { needsReconstruction = true break @@ -82,16 +79,16 @@ func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error { if !needsReconstruction { return nil } - if err := s.erasure.ReconstructData(data); err != nil { + if err := e.encoder.ReconstructData(data); err != nil { return err } return nil } -// ErasureDecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it. +// DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it. // It returns an error if the decoding failed. -func (s *ErasureStorage) ErasureDecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error { - if err := s.erasure.Reconstruct(data); err != nil { +func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error { + if err := e.encoder.Reconstruct(data); err != nil { logger.LogIf(ctx, err) return err } diff --git a/cmd/erasure_test.go b/cmd/erasure_test.go index ff1e89ee8..132f33858 100644 --- a/cmd/erasure_test.go +++ b/cmd/erasure_test.go @@ -25,7 +25,7 @@ import ( "testing" ) -var erasureDecodeTests = []struct { +var erasureEncodeDecodeTests = []struct { dataBlocks, parityBlocks int missingData, missingParity int reconstructParity bool @@ -43,20 +43,20 @@ var erasureDecodeTests = []struct { {dataBlocks: 8, parityBlocks: 4, missingData: 2, missingParity: 2, reconstructParity: false, shouldFail: false}, } -func TestErasureDecode(t *testing.T) { +func TestErasureEncodeDecode(t *testing.T) { data := make([]byte, 256) if _, err := io.ReadFull(rand.Reader, data); err != nil { t.Fatalf("Failed to read random data: %v", err) } - for i, test := range erasureDecodeTests { + for i, test := range erasureEncodeDecodeTests { buffer := make([]byte, len(data), 2*len(data)) copy(buffer, data) - storage, err := NewErasureStorage(context.Background(), test.dataBlocks, test.parityBlocks, blockSizeV1) + erasure, err := NewErasure(context.Background(), test.dataBlocks, test.parityBlocks, blockSizeV1) if err != nil { - t.Fatalf("Test %d: failed to create erasure storage: %v", i, err) + t.Fatalf("Test %d: failed to create erasure: %v", i, err) } - encoded, err := storage.ErasureEncode(context.Background(), buffer) + encoded, err := erasure.EncodeData(context.Background(), buffer) if err != nil { t.Fatalf("Test %d: failed to encode data: %v", i, err) } @@ -69,9 +69,9 @@ func TestErasureDecode(t *testing.T) { } if test.reconstructParity { - err = storage.ErasureDecodeDataAndParityBlocks(context.Background(), encoded) + err = erasure.DecodeDataAndParityBlocks(context.Background(), encoded) } else { - err = storage.ErasureDecodeDataBlocks(encoded) + err = erasure.DecodeDataBlocks(encoded) } if err == nil && test.shouldFail { diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index c096c617a..d7ea10b59 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -261,7 +261,7 @@ func (fs *FSObjects) StorageInfo(ctx context.Context) StorageInfo { storageInfo := StorageInfo{ Used: used, } - storageInfo.Backend.Type = FS + storageInfo.Backend.Type = BackendFS return storageInfo } diff --git a/cmd/metrics.go b/cmd/metrics.go index 7853499bb..9473af174 100644 --- a/cmd/metrics.go +++ b/cmd/metrics.go @@ -124,7 +124,7 @@ func (c *minioCollector) Collect(ch chan<- prometheus.Metric) { var totalDisks, offlineDisks int // Setting totalDisks to 1 and offlineDisks to 0 in FS mode - if s.Backend.Type == FS { + if s.Backend.Type == BackendFS { totalDisks = 1 offlineDisks = 0 } else { diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index ac72a19f9..835e61460 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -31,9 +31,9 @@ type BackendType int const ( Unknown BackendType = iota // Filesystem backend. - FS - // Multi disk Erasure (single, distributed) backend. - Erasure + BackendFS + // Multi disk BackendErasure (single, distributed) backend. + BackendErasure // Add your own backend. ) diff --git a/cmd/server-startup-msg.go b/cmd/server-startup-msg.go index 6cbe0c656..7733e8d75 100644 --- a/cmd/server-startup-msg.go +++ b/cmd/server-startup-msg.go @@ -174,7 +174,7 @@ func printObjectAPIMsg() { // Get formatted disk/storage info message. func getStorageInfoMsg(storageInfo StorageInfo) string { var msg string - if storageInfo.Backend.Type == Erasure { + if storageInfo.Backend.Type == BackendErasure { diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks) msg += colorBlue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo) } diff --git a/cmd/server-startup-msg_test.go b/cmd/server-startup-msg_test.go index bb7e5ccf8..9739e3a3b 100644 --- a/cmd/server-startup-msg_test.go +++ b/cmd/server-startup-msg_test.go @@ -30,7 +30,7 @@ import ( // Tests if we generate storage info. func TestStorageInfoMsg(t *testing.T) { infoStorage := StorageInfo{} - infoStorage.Backend.Type = Erasure + infoStorage.Backend.Type = BackendErasure infoStorage.Backend.OnlineDisks = 7 infoStorage.Backend.OfflineDisks = 1 diff --git a/cmd/storage-rpc_test.go b/cmd/storage-rpc_test.go index 68c3fbfb6..634595bed 100644 --- a/cmd/storage-rpc_test.go +++ b/cmd/storage-rpc_test.go @@ -17,7 +17,6 @@ package cmd import ( - "fmt" "io/ioutil" "net/http" "net/http/httptest" @@ -355,7 +354,6 @@ func testStorageAPIReadFile(t *testing.T, storage StorageAPI) { expectErr := (err != nil) if expectErr != testCase.expectErr { - fmt.Println(err) t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) } diff --git a/cmd/xl-sets.go b/cmd/xl-sets.go index 68dde9ec6..ee65adfff 100644 --- a/cmd/xl-sets.go +++ b/cmd/xl-sets.go @@ -272,7 +272,7 @@ func newXLSets(endpoints EndpointList, format *formatXLV3, setCount int, drivesP // StorageInfo - combines output of StorageInfo across all erasure coded object sets. func (s *xlSets) StorageInfo(ctx context.Context) StorageInfo { var storageInfo StorageInfo - storageInfo.Backend.Type = Erasure + storageInfo.Backend.Type = BackendErasure for _, set := range s.sets { lstorageInfo := set.StorageInfo(ctx) storageInfo.Used = storageInfo.Used + lstorageInfo.Used diff --git a/cmd/xl-v1-healing.go b/cmd/xl-v1-healing.go index 9b6fa5865..fa58db62c 100644 --- a/cmd/xl-v1-healing.go +++ b/cmd/xl-v1-healing.go @@ -446,7 +446,7 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o // Heal each part. erasureHealFile() will write the healed // part to .minio/tmp/uuid/ which needs to be renamed later to // the final location. - storage, err := NewErasureStorage(ctx, latestMeta.Erasure.DataBlocks, + erasure, err := NewErasure(ctx, latestMeta.Erasure.DataBlocks, latestMeta.Erasure.ParityBlocks, latestMeta.Erasure.BlockSize) if err != nil { return result, toObjectErr(err, bucket, object) @@ -455,7 +455,7 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o for partIndex := 0; partIndex < len(latestMeta.Parts); partIndex++ { partName := latestMeta.Parts[partIndex].Name partSize := latestMeta.Parts[partIndex].Size - erasure := latestMeta.Erasure + erasureInfo := latestMeta.Erasure var algorithm BitrotAlgorithm bitrotReaders := make([]*bitrotReader, len(latestDisks)) for i, disk := range latestDisks { @@ -464,7 +464,7 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o } info := partsMetadata[i].Erasure.GetChecksumInfo(partName) algorithm = info.Algorithm - endOffset := getErasureShardFileEndOffset(0, partSize, partSize, erasure.BlockSize, storage.dataBlocks) + endOffset := getErasureShardFileEndOffset(0, partSize, partSize, erasureInfo.BlockSize, erasure.dataBlocks) bitrotReaders[i] = newBitrotReader(disk, bucket, pathJoin(object, partName), algorithm, endOffset, info.Hash) } bitrotWriters := make([]*bitrotWriter, len(outDatedDisks)) @@ -474,7 +474,7 @@ func healObject(ctx context.Context, storageDisks []StorageAPI, bucket string, o } bitrotWriters[i] = newBitrotWriter(disk, minioMetaTmpBucket, pathJoin(tmpID, partName), algorithm) } - hErr := storage.HealFile(ctx, bitrotReaders, bitrotWriters, partSize) + hErr := erasure.Heal(ctx, bitrotReaders, bitrotWriters, partSize) if hErr != nil { return result, toObjectErr(hErr, bucket, object) } diff --git a/cmd/xl-v1-multipart.go b/cmd/xl-v1-multipart.go index ed46d9373..8b376f3aa 100644 --- a/cmd/xl-v1-multipart.go +++ b/cmd/xl-v1-multipart.go @@ -369,7 +369,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID } } - storage, err := NewErasureStorage(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) + erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) if err != nil { return pi, toObjectErr(err, bucket, object) } @@ -397,7 +397,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID } writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, DefaultBitrotAlgorithm) } - n, err := storage.CreateFile(ctx, data, writers, buffer, storage.dataBlocks+1) + n, err := erasure.Encode(ctx, data, writers, buffer, erasure.dataBlocks+1) if err != nil { return pi, toObjectErr(err, bucket, object) } diff --git a/cmd/xl-v1-object.go b/cmd/xl-v1-object.go index c4a110528..d056fb30b 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/xl-v1-object.go @@ -262,7 +262,7 @@ func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startO } var totalBytesRead int64 - storage, err := NewErasureStorage(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) + erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) if err != nil { return toObjectErr(err, bucket, object) } @@ -292,7 +292,7 @@ func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startO bitrotReaders[index] = newBitrotReader(disk, bucket, pathJoin(object, partName), checksumInfo.Algorithm, endOffset, checksumInfo.Hash) } - err := storage.ReadFile(ctx, writer, bitrotReaders, partOffset, partLength, partSize) + err := erasure.Decode(ctx, writer, bitrotReaders, partOffset, partLength, partSize) if err != nil { return toObjectErr(err, bucket, object) } @@ -608,7 +608,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, // Total size of the written object var sizeWritten int64 - storage, err := NewErasureStorage(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) + erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) if err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } @@ -667,7 +667,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, } writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, DefaultBitrotAlgorithm) } - n, erasureErr := storage.CreateFile(ctx, curPartReader, writers, buffer, storage.dataBlocks+1) + n, erasureErr := erasure.Encode(ctx, curPartReader, writers, buffer, erasure.dataBlocks+1) if erasureErr != nil { return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj) } diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go index 21e21fabd..09a302231 100644 --- a/cmd/xl-v1.go +++ b/cmd/xl-v1.go @@ -30,6 +30,9 @@ const ( xlMetaJSONFile = "xl.json" ) +// OfflineDisk represents an unavailable disk. +var OfflineDisk StorageAPI // zero value is nil + // xlObjects - Implements XL object layer. type xlObjects struct { // name space mutex for object layer. @@ -137,7 +140,7 @@ func getStorageInfo(disks []StorageAPI) StorageInfo { } storageInfo.Used = used - storageInfo.Backend.Type = Erasure + storageInfo.Backend.Type = BackendErasure storageInfo.Backend.OnlineDisks = onlineDisks storageInfo.Backend.OfflineDisks = offlineDisks