Automatically set goroutines based on shardSize (#5346)

Update reedsolomon library to enable feature to automatically
set number of go-routines based on the input shard size,
since shard size is sort of a constant in Minio for
objects > 10MiB (default blocksize)

klauspost reported around 15-20% improvement in performance
numbers on older systems such as AVX and SSE3

```
name                  old speed      new speed      delta
Encode10x2x10000-8    5.45GB/s ± 1%  6.22GB/s ± 1%  +14.20%    (p=0.000 n=9+9)
Encode100x20x10000-8  1.44GB/s ± 1%  1.64GB/s ± 1%  +13.77%  (p=0.000 n=10+10)
Encode17x3x1M-8       10.0GB/s ± 5%  12.0GB/s ± 1%  +19.88%  (p=0.000 n=10+10)
Encode10x4x16M-8      7.81GB/s ± 5%  8.56GB/s ± 5%   +9.58%   (p=0.000 n=10+9)
Encode5x2x1M-8        15.3GB/s ± 2%  19.6GB/s ± 2%  +28.57%   (p=0.000 n=9+10)
Encode10x2x1M-8       12.2GB/s ± 5%  15.0GB/s ± 5%  +22.45%  (p=0.000 n=10+10)
Encode10x4x1M-8       7.84GB/s ± 1%  9.03GB/s ± 1%  +15.19%    (p=0.000 n=9+9)
Encode50x20x1M-8      1.73GB/s ± 4%  2.09GB/s ± 4%  +20.59%   (p=0.000 n=10+9)
Encode17x3x16M-8      10.6GB/s ± 1%  11.7GB/s ± 4%  +10.12%   (p=0.000 n=8+10)
```
master
Harshavardhana 7 years ago committed by kannappanr
parent b1fb550d5c
commit c0721164be
  1. 4
      cmd/erasure-createfile_test.go
  2. 2
      cmd/erasure-healfile_test.go
  3. 6
      cmd/erasure-readfile_test.go
  4. 5
      cmd/erasure.go
  5. 2
      cmd/erasure_test.go
  6. 2
      cmd/xl-v1-healing.go
  7. 2
      cmd/xl-v1-multipart.go
  8. 6
      cmd/xl-v1-object.go
  9. 4
      vendor/github.com/klauspost/reedsolomon/README.md
  10. 56
      vendor/github.com/klauspost/reedsolomon/galois_amd64.s
  11. 15
      vendor/github.com/klauspost/reedsolomon/options.go
  12. 34
      vendor/github.com/klauspost/reedsolomon/reedsolomon.go
  13. 54
      vendor/vendor.json

@ -70,7 +70,7 @@ func TestErasureCreateFile(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to create test setup: %v", i, err)
}
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks)
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil {
setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -125,7 +125,7 @@ func benchmarkErasureWrite(data, parity, dataDown, parityDown int, size int64, b
b.Fatalf("failed to create test setup: %v", err)
}
defer setup.Remove()
storage, err := NewErasureStorage(setup.disks, data, parity)
storage, err := NewErasureStorage(setup.disks, data, parity, blockSizeV1)
if err != nil {
b.Fatalf("failed to create ErasureStorage: %v", err)
}

@ -74,7 +74,7 @@ func TestErasureHealFile(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to setup XL environment: %v", i, err)
}
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.disks-test.dataBlocks)
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
if err != nil {
setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)

@ -86,7 +86,7 @@ func TestErasureReadFile(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to create test setup: %v", i, err)
}
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks)
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil {
setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -174,7 +174,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
}
defer setup.Remove()
storage, err := NewErasureStorage(setup.disks, dataBlocks, parityBlocks)
storage, err := NewErasureStorage(setup.disks, dataBlocks, parityBlocks, blockSize)
if err != nil {
t.Fatalf("failed to create ErasureStorage: %v", err)
}
@ -231,7 +231,7 @@ func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b
b.Fatalf("failed to create test setup: %v", err)
}
defer setup.Remove()
storage, err := NewErasureStorage(setup.disks, data, parity)
storage, err := NewErasureStorage(setup.disks, data, parity, blockSizeV1)
if err != nil {
b.Fatalf("failed to create ErasureStorage: %v", err)
}

@ -44,8 +44,9 @@ type ErasureStorage struct {
// NewErasureStorage creates a new ErasureStorage. The storage erasure codes and protects all data written to
// the disks.
func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int) (s ErasureStorage, err error) {
erasure, err := reedsolomon.New(dataBlocks, parityBlocks)
func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int, blockSize int64) (s ErasureStorage, err error) {
shardsize := (int(blockSize) + dataBlocks - 1) / dataBlocks
erasure, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(shardsize))
if err != nil {
return s, errors.Tracef("failed to create erasure coding: %v", err)
}

@ -52,7 +52,7 @@ func TestErasureDecode(t *testing.T) {
copy(buffer, data)
disks := make([]StorageAPI, test.dataBlocks+test.parityBlocks)
storage, err := NewErasureStorage(disks, test.dataBlocks, test.parityBlocks)
storage, err := NewErasureStorage(disks, test.dataBlocks, test.parityBlocks, blockSizeV1)
if err != nil {
t.Fatalf("Test %d: failed to create erasure storage: %v", i, err)
}

@ -447,7 +447,7 @@ func healObject(storageDisks []StorageAPI, bucket, object string, quorum int) (i
// part to .minio/tmp/uuid/ which needs to be renamed later to
// the final location.
storage, err := NewErasureStorage(latestDisks,
latestMeta.Erasure.DataBlocks, latestMeta.Erasure.ParityBlocks)
latestMeta.Erasure.DataBlocks, latestMeta.Erasure.ParityBlocks, latestMeta.Erasure.BlockSize)
if err != nil {
return 0, 0, toObjectErr(err, bucket, object)
}

@ -691,7 +691,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
}
}
storage, err := NewErasureStorage(onlineDisks, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks)
storage, err := NewErasureStorage(onlineDisks, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}

@ -262,7 +262,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
}
var totalBytesRead int64
storage, err := NewErasureStorage(onlineDisks, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks)
storage, err := NewErasureStorage(onlineDisks, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil {
return toObjectErr(err, bucket, object)
}
@ -535,13 +535,13 @@ func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, m
// Total size of the written object
var sizeWritten int64
storage, err := NewErasureStorage(onlineDisks, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks)
storage, err := NewErasureStorage(onlineDisks, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Alloc additional space for parity blocks created while erasure coding
buffer := make([]byte, partsMetadata[0].Erasure.BlockSize, 2*partsMetadata[0].Erasure.BlockSize)
buffer := make([]byte, xlMeta.Erasure.BlockSize, 2*xlMeta.Erasure.BlockSize)
// Read data and split into parts - similar to multipart mechanism
for partIdx := 1; ; partIdx++ {

@ -24,6 +24,10 @@ go get -u github.com/klauspost/reedsolomon
# Changes
## November 18, 2017
Added [WithAutoGoroutines](https://godoc.org/github.com/klauspost/reedsolomon#WithAutoGoroutines) which will attempt to calculate the optimal number of goroutines to use based on your expected shard size and detected CPU.
## October 1, 2017
* [Cauchy Matrix](https://godoc.org/github.com/klauspost/reedsolomon#WithCauchyMatrix) is now an option. Thanks to [templexxx](https://github.com/templexxx) for the basis of this.

@ -19,8 +19,35 @@ TEXT ·galMulSSSE3Xor(SB), 7, $0
MOVQ out+72(FP), DX // DX: &out
PSHUFB X5, X8 // X8: lomask (unpacked)
SHRQ $4, R9 // len(in) / 16
MOVQ SI, AX
MOVQ DX, BX
ANDQ $15, AX
ANDQ $15, BX
CMPQ R9, $0
JEQ done_xor
ORQ AX, BX
CMPQ BX, $0
JNZ loopback_xor
loopback_xor_aligned:
MOVOA (SI), X0 // in[x]
MOVOA (DX), X4 // out[x]
MOVOA X0, X1 // in[x]
MOVOA X6, X2 // low copy
MOVOA X7, X3 // high copy
PSRLQ $4, X1 // X1: high input
PAND X8, X0 // X0: low input
PAND X8, X1 // X0: high input
PSHUFB X0, X2 // X2: mul low part
PSHUFB X1, X3 // X3: mul high part
PXOR X2, X3 // X3: Result
PXOR X4, X3 // X3: Result xor existing out
MOVOA X3, (DX) // Store
ADDQ $16, SI // in+=16
ADDQ $16, DX // out+=16
SUBQ $1, R9
JNZ loopback_xor_aligned
JMP done_xor
loopback_xor:
MOVOU (SI), X0 // in[x]
@ -57,15 +84,40 @@ TEXT ·galMulSSSE3(SB), 7, $0
MOVQ in_len+56(FP), R9 // R9: len(in)
MOVQ out+72(FP), DX // DX: &out
PSHUFB X5, X8 // X8: lomask (unpacked)
MOVQ SI, AX
MOVQ DX, BX
SHRQ $4, R9 // len(in) / 16
ANDQ $15, AX
ANDQ $15, BX
CMPQ R9, $0
JEQ done
ORQ AX, BX
CMPQ BX, $0
JNZ loopback
loopback_aligned:
MOVOA (SI), X0 // in[x]
MOVOA X0, X1 // in[x]
MOVOA X6, X2 // low copy
MOVOA X7, X3 // high copy
PSRLQ $4, X1 // X1: high input
PAND X8, X0 // X0: low input
PAND X8, X1 // X0: high input
PSHUFB X0, X2 // X2: mul low part
PSHUFB X1, X3 // X3: mul high part
PXOR X2, X3 // X3: Result
MOVOA X3, (DX) // Store
ADDQ $16, SI // in+=16
ADDQ $16, DX // out+=16
SUBQ $1, R9
JNZ loopback_aligned
JMP done
loopback:
MOVOU (SI), X0 // in[x]
MOVOU X0, X1 // in[x]
MOVOU X6, X2 // low copy
MOVOU X7, X3 // high copy
MOVOA X6, X2 // low copy
MOVOA X7, X3 // high copy
PSRLQ $4, X1 // X1: high input
PAND X8, X0 // X0: low input
PAND X8, X1 // X0: high input

@ -15,11 +15,12 @@ type options struct {
useAVX2, useSSSE3, useSSE2 bool
usePAR1Matrix bool
useCauchy bool
shardSize int
}
var defaultOptions = options{
maxGoroutines: 384,
minSplitSize: 512,
minSplitSize: 1024,
}
func init() {
@ -46,6 +47,18 @@ func WithMaxGoroutines(n int) Option {
}
}
// WithAutoGoroutines will adjust the number of goroutines for optimal speed with a
// specific shard size.
// Send in the shard size you expect to send. Other shard sizes will work, but may not
// run at the optimal speed.
// Overwrites WithMaxGoroutines.
// If shardSize <= 0, it is ignored.
func WithAutoGoroutines(shardSize int) Option {
return func(o *options) {
o.shardSize = shardSize
}
}
// WithMinSplitSize is the minimum encoding size in bytes per goroutine.
// See WithMaxGoroutines on how jobs are split.
// If n <= 0, it is ignored.

@ -15,7 +15,10 @@ import (
"bytes"
"errors"
"io"
"runtime"
"sync"
"github.com/klauspost/cpuid"
)
// Encoder is an interface to encode Reed-Salomon parity sets for your data.
@ -239,6 +242,33 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
if err != nil {
return nil, err
}
if r.o.shardSize > 0 {
cacheSize := cpuid.CPU.Cache.L2
if cacheSize <= 0 {
// Set to 128K if undetectable.
cacheSize = 128 << 10
}
p := runtime.NumCPU()
// 1 input + parity must fit in cache, and we add one more to be safer.
shards := 1 + parityShards
g := (r.o.shardSize * shards) / (cacheSize - (cacheSize >> 4))
if cpuid.CPU.ThreadsPerCore > 1 {
// If multiple threads per core, make sure they don't contend for cache.
g *= cpuid.CPU.ThreadsPerCore
}
g *= 2
if g < p {
g = p
}
// Have g be multiple of p
g += p - 1
g -= g % p
r.o.maxGoroutines = g
}
// Inverted matrices are cached in a tree keyed by the indices
// of the invalid rows of the data to reconstruct.
@ -431,6 +461,8 @@ func (r reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outpu
if do < r.o.minSplitSize {
do = r.o.minSplitSize
}
// Make sizes divisible by 16
do = (do + 15) & (^15)
start := 0
for start < byteCount {
if start+do > byteCount {
@ -490,6 +522,8 @@ func (r reedSolomon) checkSomeShardsP(matrixRows, inputs, toCheck [][]byte, outp
if do < r.o.minSplitSize {
do = r.o.minSplitSize
}
// Make sizes divisible by 16
do = (do + 15) & (^15)
start := 0
for start < byteCount {
if start+do > byteCount {

54
vendor/vendor.json vendored

@ -267,30 +267,6 @@
"revision": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38",
"revisionTime": "2016-01-19T13:13:26-08:00"
},
{
"checksumSHA1": "NYs0qvjZwsMZAXMtg2HRiED2cb4=",
"path": "github.com/joyent/triton-go",
"revision": "8365851ee7afcbb4cc1c7ba2e414b242ce0574f1",
"revisionTime": "2017-12-15T19:09:06Z"
},
{
"checksumSHA1": "Cth7NCLH/HaeKh9ZMRpQtudTEQQ=",
"path": "github.com/joyent/triton-go/authentication",
"revision": "8365851ee7afcbb4cc1c7ba2e414b242ce0574f1",
"revisionTime": "2017-12-15T19:09:06Z"
},
{
"checksumSHA1": "3ju04DVaxotpCKBF3Q/0vCSOlec=",
"path": "github.com/joyent/triton-go/client",
"revision": "8365851ee7afcbb4cc1c7ba2e414b242ce0574f1",
"revisionTime": "2017-12-15T19:09:06Z"
},
{
"checksumSHA1": "/WtyDZMgstGbBYtQ0f+ZfKMS4v8=",
"path": "github.com/joyent/triton-go/storage",
"revision": "8365851ee7afcbb4cc1c7ba2e414b242ce0574f1",
"revisionTime": "2017-12-15T19:09:06Z"
},
{
"checksumSHA1": "K6exl2ouL7d8cR2i378EzZOdRVI=",
"path": "github.com/howeyc/gopass",
@ -315,6 +291,30 @@
"revision": "8152e7eb6ccf8679a64582a66b78519688d156ad",
"revisionTime": "2016-01-12T19:33:35Z"
},
{
"checksumSHA1": "NYs0qvjZwsMZAXMtg2HRiED2cb4=",
"path": "github.com/joyent/triton-go",
"revision": "8365851ee7afcbb4cc1c7ba2e414b242ce0574f1",
"revisionTime": "2017-12-15T19:09:06Z"
},
{
"checksumSHA1": "Cth7NCLH/HaeKh9ZMRpQtudTEQQ=",
"path": "github.com/joyent/triton-go/authentication",
"revision": "8365851ee7afcbb4cc1c7ba2e414b242ce0574f1",
"revisionTime": "2017-12-15T19:09:06Z"
},
{
"checksumSHA1": "3ju04DVaxotpCKBF3Q/0vCSOlec=",
"path": "github.com/joyent/triton-go/client",
"revision": "8365851ee7afcbb4cc1c7ba2e414b242ce0574f1",
"revisionTime": "2017-12-15T19:09:06Z"
},
{
"checksumSHA1": "/WtyDZMgstGbBYtQ0f+ZfKMS4v8=",
"path": "github.com/joyent/triton-go/storage",
"revision": "8365851ee7afcbb4cc1c7ba2e414b242ce0574f1",
"revisionTime": "2017-12-15T19:09:06Z"
},
{
"path": "github.com/klauspost/cpuid",
"revision": "349c675778172472f5e8f3a3e0fe187e302e5a10",
@ -327,10 +327,10 @@
"revisionTime": "2016-10-16T15:41:25Z"
},
{
"checksumSHA1": "Nlhq327ZGeEh1gkPEBNPJKFlwds=",
"checksumSHA1": "ehsrWipiGIWqa4To8TmelIx06vI=",
"path": "github.com/klauspost/reedsolomon",
"revision": "e52c150f961e65ab9538bf1276b33bf469f919d8",
"revisionTime": "2017-11-18T15:17:31Z"
"revision": "0b30fa71cc8e4e9010c9aba6d0320e2e5b163b29",
"revisionTime": "2017-12-19T13:34:37Z"
},
{
"checksumSHA1": "dNYxHiBLalTqluak2/Z8c3RsSEM=",

Loading…
Cancel
Save