Use humanize constants for KiB, MiB and GiB units. (#3322)

master
Bala FA 8 years ago committed by Harshavardhana
parent c1ebcbcda2
commit 825000bc34
  1. 4
      cmd/benchmark-utils_test.go
  2. 3
      cmd/bucket-policy-handlers.go
  3. 5
      cmd/erasure-createfile_test.go
  4. 12
      cmd/erasure-healfile_test.go
  5. 20
      cmd/erasure-readfile_test.go
  6. 3
      cmd/generic-handlers.go
  7. 3
      cmd/globals.go
  8. 64
      cmd/object-api-getobject_test.go
  9. 20
      cmd/object-api-multipart_test.go
  10. 58
      cmd/object-api-putobject_test.go
  11. 6
      cmd/object-common.go
  12. 30
      cmd/object-handlers_test.go
  13. 6
      cmd/object_api_suite_test.go
  14. 5
      cmd/posix.go
  15. 4
      cmd/post-policy_test.go
  16. 6
      cmd/server-startup-msg_test.go
  17. 6
      cmd/server_test.go
  18. 3
      cmd/streaming-signature-v4.go
  19. 7
      cmd/utils.go
  20. 7
      cmd/web-handlers_test.go
  21. 20
      cmd/xl-v1-metadata_test.go
  22. 10
      cmd/xl-v1-object_test.go
  23. 3
      cmd/xl-v1.go

@ -24,6 +24,8 @@ import (
"strconv"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
)
// Prepare benchmark backend
@ -107,7 +109,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
b.Fatal(err)
}
objSize := 128 * 1024 * 1024
objSize := 128 * humanize.MiByte
// PutObjectPart returns md5Sum of the object inserted.
// md5Sum variable is assigned with that value.

@ -23,13 +23,14 @@ import (
"io/ioutil"
"net/http"
humanize "github.com/dustin/go-humanize"
mux "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/wildcard"
)
// maximum supported access policy size.
const maxAccessPolicySize = 20 * 1024 // 20KiB.
const maxAccessPolicySize = 20 * humanize.KiByte
// Verify if a given action is valid for the url path based on the
// existing bucket access policy.

@ -21,6 +21,7 @@ import (
"crypto/rand"
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/klauspost/reedsolomon"
)
@ -48,8 +49,8 @@ func TestErasureCreateFile(t *testing.T) {
disks := setup.disks
// Prepare a slice of 1MB with random data.
data := make([]byte, 1*1024*1024)
// Prepare a slice of 1MiB with random data.
data := make([]byte, 1*humanize.MiByte)
_, err = rand.Read(data)
if err != nil {
t.Fatal(err)

@ -22,6 +22,8 @@ import (
"os"
"path"
"testing"
humanize "github.com/dustin/go-humanize"
)
// Test erasureHealFile()
@ -39,8 +41,8 @@ func TestErasureHealFile(t *testing.T) {
disks := setup.disks
// Prepare a slice of 1MB with random data.
data := make([]byte, 1*1024*1024)
// Prepare a slice of 1MiB with random data.
data := make([]byte, 1*humanize.MiByte)
_, err = rand.Read(data)
if err != nil {
t.Fatal(err)
@ -67,7 +69,7 @@ func TestErasureHealFile(t *testing.T) {
latest[0] = nil
outDated[0] = disks[0]
healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*humanize.MiByte, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
if err != nil {
t.Fatal(err)
}
@ -90,7 +92,7 @@ func TestErasureHealFile(t *testing.T) {
outDated[index] = disks[index]
}
healCheckSums, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
healCheckSums, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*humanize.MiByte, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
if err != nil {
t.Fatal(err)
}
@ -120,7 +122,7 @@ func TestErasureHealFile(t *testing.T) {
latest[index] = nil
outDated[index] = disks[index]
}
_, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
_, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*humanize.MiByte, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
if err == nil {
t.Error("Expected erasureHealFile() to fail when the number of available disks <= parityBlocks")
}

@ -22,9 +22,11 @@ import (
"testing"
"time"
"reflect"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/bpool"
)
import "reflect"
// Tests getReadDisks which returns readable disks slice from which we can
// read parallelly.
@ -260,8 +262,8 @@ func TestErasureReadFileDiskFail(t *testing.T) {
disks := setup.disks
// Prepare a slice of 1MB with random data.
data := make([]byte, 1*1024*1024)
// Prepare a slice of 1humanize.MiByte with random data.
data := make([]byte, 1*humanize.MiByte)
length := int64(len(data))
_, err = rand.Read(data)
if err != nil {
@ -333,7 +335,7 @@ func TestErasureReadFileOffsetLength(t *testing.T) {
// Initialize environment needed for the test.
dataBlocks := 7
parityBlocks := 7
blockSize := int64(1 * 1024 * 1024)
blockSize := int64(1 * humanize.MiByte)
setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize)
if err != nil {
t.Error(err)
@ -343,8 +345,8 @@ func TestErasureReadFileOffsetLength(t *testing.T) {
disks := setup.disks
// Prepare a slice of 5MB with random data.
data := make([]byte, 5*1024*1024)
// Prepare a slice of 5humanize.MiByte with random data.
data := make([]byte, 5*humanize.MiByte)
length := int64(len(data))
_, err = rand.Read(data)
if err != nil {
@ -409,7 +411,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
// Initialize environment needed for the test.
dataBlocks := 7
parityBlocks := 7
blockSize := int64(1 * 1024 * 1024)
blockSize := int64(1 * humanize.MiByte)
setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize)
if err != nil {
t.Error(err)
@ -419,8 +421,8 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
disks := setup.disks
// Prepare a slice of 5MB with random data.
data := make([]byte, 5*1024*1024)
// Prepare a slice of 5MiB with random data.
data := make([]byte, 5*humanize.MiByte)
length := int64(len(data))
_, err = rand.Read(data)
if err != nil {

@ -23,6 +23,7 @@ import (
"strings"
"time"
humanize "github.com/dustin/go-humanize"
router "github.com/gorilla/mux"
"github.com/rs/cors"
)
@ -43,7 +44,7 @@ func registerHandlers(mux *router.Router, handlerFns ...HandlerFunc) http.Handle
// Set the body size limit to 6 Gb = Maximum object size + other possible data
// in the same request
const requestMaxBodySize = 1024 * 1024 * 1024 * (5 + 1)
const requestMaxBodySize = (5 + 1) * humanize.GiByte
type requestSizeLimitHandler struct {
handler http.Handler

@ -20,6 +20,7 @@ import (
"crypto/x509"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/fatih/color"
"github.com/minio/minio/pkg/objcache"
)
@ -70,7 +71,7 @@ var (
var (
// Limit fields size (except file) to 1Mib since Policy document
// can reach that size according to https://aws.amazon.com/articles/1434
maxFormFieldSize = int64(1024 * 1024)
maxFormFieldSize = int64(1 * humanize.MiByte)
)
var (

@ -24,6 +24,8 @@ import (
"runtime"
"strings"
"testing"
humanize "github.com/dustin/go-humanize"
)
// Wrapper for calling GetObject tests for both XL multiple disks and single node setup.
@ -50,7 +52,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
bytesData := []struct {
byteData []byte
}{
{generateBytesData(6 * 1024 * 1024)},
{generateBytesData(6 * humanize.MiByte)},
}
// set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := []struct {
@ -199,7 +201,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
bytesData := []struct {
byteData []byte
}{
{generateBytesData(6 * 1024 * 1024)},
{generateBytesData(6 * humanize.MiByte)},
}
// set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := []struct {
@ -314,7 +316,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
bytesData := []struct {
byteData []byte
}{
{generateBytesData(6 * 1024 * 1024)},
{generateBytesData(6 * humanize.MiByte)},
}
// set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := []struct {
@ -463,73 +465,73 @@ func BenchmarkGetObjectVerySmallXL(b *testing.B) {
// BenchmarkGetObject10KbFS - Benchmark FS.GetObject() for object size of 10KB.
func BenchmarkGetObject10KbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 10*1024)
benchmarkGetObject(b, "FS", 10*humanize.KiByte)
}
// BenchmarkGetObject10KbXL - Benchmark XL.GetObject() for object size of 10KB.
func BenchmarkGetObject10KbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 10*1024)
benchmarkGetObject(b, "XL", 10*humanize.KiByte)
}
// BenchmarkGetObject100KbFS - Benchmark FS.GetObject() for object size of 100KB.
func BenchmarkGetObject100KbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 100*1024)
benchmarkGetObject(b, "FS", 100*humanize.KiByte)
}
// BenchmarkGetObject100KbXL - Benchmark XL.GetObject() for object size of 100KB.
func BenchmarkGetObject100KbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 100*1024)
benchmarkGetObject(b, "XL", 100*humanize.KiByte)
}
// BenchmarkGetObject1MbFS - Benchmark FS.GetObject() for object size of 1MB.
func BenchmarkGetObject1MbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 1024*1024)
benchmarkGetObject(b, "FS", 1*humanize.MiByte)
}
// BenchmarkGetObject1MbXL - Benchmark XL.GetObject() for object size of 1MB.
func BenchmarkGetObject1MbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 1024*1024)
benchmarkGetObject(b, "XL", 1*humanize.MiByte)
}
// BenchmarkGetObject5MbFS - Benchmark FS.GetObject() for object size of 5MB.
func BenchmarkGetObject5MbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 5*1024*1024)
benchmarkGetObject(b, "FS", 5*humanize.MiByte)
}
// BenchmarkGetObject5MbXL - Benchmark XL.GetObject() for object size of 5MB.
func BenchmarkGetObject5MbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 5*1024*1024)
benchmarkGetObject(b, "XL", 5*humanize.MiByte)
}
// BenchmarkGetObject10MbFS - Benchmark FS.GetObject() for object size of 10MB.
func BenchmarkGetObject10MbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 10*1024*1024)
benchmarkGetObject(b, "FS", 10*humanize.MiByte)
}
// BenchmarkGetObject10MbXL - Benchmark XL.GetObject() for object size of 10MB.
func BenchmarkGetObject10MbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 10*1024*1024)
benchmarkGetObject(b, "XL", 10*humanize.MiByte)
}
// BenchmarkGetObject25MbFS - Benchmark FS.GetObject() for object size of 25MB.
func BenchmarkGetObject25MbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 25*1024*1024)
benchmarkGetObject(b, "FS", 25*humanize.MiByte)
}
// BenchmarkGetObject25MbXL - Benchmark XL.GetObject() for object size of 25MB.
func BenchmarkGetObject25MbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 25*1024*1024)
benchmarkGetObject(b, "XL", 25*humanize.MiByte)
}
// BenchmarkGetObject50MbFS - Benchmark FS.GetObject() for object size of 50MB.
func BenchmarkGetObject50MbFS(b *testing.B) {
benchmarkGetObject(b, "FS", 50*1024*1024)
benchmarkGetObject(b, "FS", 50*humanize.MiByte)
}
// BenchmarkGetObject50MbXL - Benchmark XL.GetObject() for object size of 50MB.
func BenchmarkGetObject50MbXL(b *testing.B) {
benchmarkGetObject(b, "XL", 50*1024*1024)
benchmarkGetObject(b, "XL", 50*humanize.MiByte)
}
// parallel benchmarks for ObjectLayer.GetObject() .
@ -546,71 +548,71 @@ func BenchmarkGetObjectParallelVerySmallXL(b *testing.B) {
// BenchmarkGetObjectParallel10KbFS - Benchmark FS.GetObject() for object size of 10KB.
func BenchmarkGetObjectParallel10KbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 10*1024)
benchmarkGetObjectParallel(b, "FS", 10*humanize.KiByte)
}
// BenchmarkGetObjectParallel10KbXL - Benchmark XL.GetObject() for object size of 10KB.
func BenchmarkGetObjectParallel10KbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 10*1024)
benchmarkGetObjectParallel(b, "XL", 10*humanize.KiByte)
}
// BenchmarkGetObjectParallel100KbFS - Benchmark FS.GetObject() for object size of 100KB.
func BenchmarkGetObjectParallel100KbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 100*1024)
benchmarkGetObjectParallel(b, "FS", 100*humanize.KiByte)
}
// BenchmarkGetObjectParallel100KbXL - Benchmark XL.GetObject() for object size of 100KB.
func BenchmarkGetObjectParallel100KbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 100*1024)
benchmarkGetObjectParallel(b, "XL", 100*humanize.KiByte)
}
// BenchmarkGetObjectParallel1MbFS - Benchmark FS.GetObject() for object size of 1MB.
func BenchmarkGetObjectParallel1MbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 1024*1024)
benchmarkGetObjectParallel(b, "FS", 1*humanize.MiByte)
}
// BenchmarkGetObjectParallel1MbXL - Benchmark XL.GetObject() for object size of 1MB.
func BenchmarkGetObjectParallel1MbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 1024*1024)
benchmarkGetObjectParallel(b, "XL", 1*humanize.MiByte)
}
// BenchmarkGetObjectParallel5MbFS - Benchmark FS.GetObject() for object size of 5MB.
func BenchmarkGetObjectParallel5MbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 5*1024*1024)
benchmarkGetObjectParallel(b, "FS", 5*humanize.MiByte)
}
// BenchmarkGetObjectParallel5MbXL - Benchmark XL.GetObject() for object size of 5MB.
func BenchmarkGetObjectParallel5MbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 5*1024*1024)
benchmarkGetObjectParallel(b, "XL", 5*humanize.MiByte)
}
// BenchmarkGetObjectParallel10MbFS - Benchmark FS.GetObject() for object size of 10MB.
func BenchmarkGetObjectParallel10MbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 10*1024*1024)
benchmarkGetObjectParallel(b, "FS", 10*humanize.MiByte)
}
// BenchmarkGetObjectParallel10MbXL - Benchmark XL.GetObject() for object size of 10MB.
func BenchmarkGetObjectParallel10MbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 10*1024*1024)
benchmarkGetObjectParallel(b, "XL", 10*humanize.MiByte)
}
// BenchmarkGetObjectParallel25MbFS - Benchmark FS.GetObject() for object size of 25MB.
func BenchmarkGetObjectParallel25MbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 25*1024*1024)
benchmarkGetObjectParallel(b, "FS", 25*humanize.MiByte)
}
// BenchmarkGetObjectParallel25MbXL - Benchmark XL.GetObject() for object size of 25MB.
func BenchmarkGetObjectParallel25MbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 25*1024*1024)
benchmarkGetObjectParallel(b, "XL", 25*humanize.MiByte)
}
// BenchmarkGetObjectParallel50MbFS - Benchmark FS.GetObject() for object size of 50MB.
func BenchmarkGetObjectParallel50MbFS(b *testing.B) {
benchmarkGetObjectParallel(b, "FS", 50*1024*1024)
benchmarkGetObjectParallel(b, "FS", 50*humanize.MiByte)
}
// BenchmarkGetObjectParallel50MbXL - Benchmark XL.GetObject() for object size of 50MB.
func BenchmarkGetObjectParallel50MbXL(b *testing.B) {
benchmarkGetObjectParallel(b, "XL", 50*1024*1024)
benchmarkGetObjectParallel(b, "XL", 50*humanize.MiByte)
}

@ -21,6 +21,8 @@ import (
"fmt"
"strings"
"testing"
humanize "github.com/dustin/go-humanize"
)
// Wrapper for calling NewMultipartUpload tests for both XL multiple disks and single node setup.
@ -1782,7 +1784,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
uploadIDs = append(uploadIDs, uploadID)
// Parts with size greater than 5 MB.
// Generating a 6MB byte array.
validPart := bytes.Repeat([]byte("abcdef"), 1024*1024)
validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)
validPartMD5 := getMD5Hash(validPart)
// Create multipart parts.
// Need parts to be uploaded before CompleteMultiPartUpload can be called tested.
@ -1941,41 +1943,41 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
// BenchmarkPutObjectPart5MbFS - Benchmark FS.PutObjectPart() for object size of 5MB.
func BenchmarkPutObjectPart5MbFS(b *testing.B) {
benchmarkPutObjectPart(b, "FS", 5*1024*1024)
benchmarkPutObjectPart(b, "FS", 5*humanize.MiByte)
}
// BenchmarkPutObjectPart5MbXL - Benchmark XL.PutObjectPart() for object size of 5MB.
func BenchmarkPutObjectPart5MbXL(b *testing.B) {
benchmarkPutObjectPart(b, "XL", 5*1024*1024)
benchmarkPutObjectPart(b, "XL", 5*humanize.MiByte)
}
// BenchmarkPutObjectPart10MbFS - Benchmark FS.PutObjectPart() for object size of 10MB.
func BenchmarkPutObjectPart10MbFS(b *testing.B) {
benchmarkPutObjectPart(b, "FS", 10*1024*1024)
benchmarkPutObjectPart(b, "FS", 10*humanize.MiByte)
}
// BenchmarkPutObjectPart10MbXL - Benchmark XL.PutObjectPart() for object size of 10MB.
func BenchmarkPutObjectPart10MbXL(b *testing.B) {
benchmarkPutObjectPart(b, "XL", 10*1024*1024)
benchmarkPutObjectPart(b, "XL", 10*humanize.MiByte)
}
// BenchmarkPutObjectPart25MbFS - Benchmark FS.PutObjectPart() for object size of 25MB.
func BenchmarkPutObjectPart25MbFS(b *testing.B) {
benchmarkPutObjectPart(b, "FS", 25*1024*1024)
benchmarkPutObjectPart(b, "FS", 25*humanize.MiByte)
}
// BenchmarkPutObjectPart25MbXL - Benchmark XL.PutObjectPart() for object size of 25MB.
func BenchmarkPutObjectPart25MbXL(b *testing.B) {
benchmarkPutObjectPart(b, "XL", 25*1024*1024)
benchmarkPutObjectPart(b, "XL", 25*humanize.MiByte)
}
// BenchmarkPutObjectPart50MbFS - Benchmark FS.PutObjectPart() for object size of 50MB.
func BenchmarkPutObjectPart50MbFS(b *testing.B) {
benchmarkPutObjectPart(b, "FS", 50*1024*1024)
benchmarkPutObjectPart(b, "FS", 50*humanize.MiByte)
}
// BenchmarkPutObjectPart50MbXL - Benchmark XL.PutObjectPart() for object size of 50MB.
func BenchmarkPutObjectPart50MbXL(b *testing.B) {
benchmarkPutObjectPart(b, "XL", 50*1024*1024)
benchmarkPutObjectPart(b, "XL", 50*humanize.MiByte)
}

@ -25,6 +25,8 @@ import (
"path"
"runtime"
"testing"
humanize "github.com/dustin/go-humanize"
)
func md5Header(data []byte) map[string]string {
@ -59,7 +61,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
var (
nilBytes []byte
data = []byte("hello")
fiveMBBytes = bytes.Repeat([]byte("a"), 5*1024*124)
fiveMBBytes = bytes.Repeat([]byte("a"), 5*humanize.MiByte)
)
invalidMD5 := getMD5Hash([]byte("meh"))
invalidMD5Header := md5Header([]byte("meh"))
@ -354,7 +356,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
}
// Upload part1.
fiveMBBytes := bytes.Repeat([]byte("a"), 5*1024*1024)
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
md5Writer := md5.New()
md5Writer.Write(fiveMBBytes)
etag1 := hex.EncodeToString(md5Writer.Sum(nil))
@ -422,73 +424,73 @@ func BenchmarkPutObjectVerySmallXL(b *testing.B) {
// BenchmarkPutObject10KbFS - Benchmark FS.PutObject() for object size of 10KB.
func BenchmarkPutObject10KbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 10*1024)
benchmarkPutObject(b, "FS", 10*humanize.KiByte)
}
// BenchmarkPutObject10KbXL - Benchmark XL.PutObject() for object size of 10KB.
func BenchmarkPutObject10KbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 10*1024)
benchmarkPutObject(b, "XL", 10*humanize.KiByte)
}
// BenchmarkPutObject100KbFS - Benchmark FS.PutObject() for object size of 100KB.
func BenchmarkPutObject100KbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 100*1024)
benchmarkPutObject(b, "FS", 100*humanize.KiByte)
}
// BenchmarkPutObject100KbXL - Benchmark XL.PutObject() for object size of 100KB.
func BenchmarkPutObject100KbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 100*1024)
benchmarkPutObject(b, "XL", 100*humanize.KiByte)
}
// BenchmarkPutObject1MbFS - Benchmark FS.PutObject() for object size of 1MB.
func BenchmarkPutObject1MbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 1024*1024)
benchmarkPutObject(b, "FS", 1*humanize.MiByte)
}
// BenchmarkPutObject1MbXL - Benchmark XL.PutObject() for object size of 1MB.
func BenchmarkPutObject1MbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 1024*1024)
benchmarkPutObject(b, "XL", 1*humanize.MiByte)
}
// BenchmarkPutObject5MbFS - Benchmark FS.PutObject() for object size of 5MB.
func BenchmarkPutObject5MbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 5*1024*1024)
benchmarkPutObject(b, "FS", 5*humanize.MiByte)
}
// BenchmarkPutObject5MbXL - Benchmark XL.PutObject() for object size of 5MB.
func BenchmarkPutObject5MbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 5*1024*1024)
benchmarkPutObject(b, "XL", 5*humanize.MiByte)
}
// BenchmarkPutObject10MbFS - Benchmark FS.PutObject() for object size of 10MB.
func BenchmarkPutObject10MbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 10*1024*1024)
benchmarkPutObject(b, "FS", 10*humanize.MiByte)
}
// BenchmarkPutObject10MbXL - Benchmark XL.PutObject() for object size of 10MB.
func BenchmarkPutObject10MbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 10*1024*1024)
benchmarkPutObject(b, "XL", 10*humanize.MiByte)
}
// BenchmarkPutObject25MbFS - Benchmark FS.PutObject() for object size of 25MB.
func BenchmarkPutObject25MbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 25*1024*1024)
benchmarkPutObject(b, "FS", 25*humanize.MiByte)
}
// BenchmarkPutObject25MbXL - Benchmark XL.PutObject() for object size of 25MB.
func BenchmarkPutObject25MbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 25*1024*1024)
benchmarkPutObject(b, "XL", 25*humanize.MiByte)
}
// BenchmarkPutObject50MbFS - Benchmark FS.PutObject() for object size of 50MB.
func BenchmarkPutObject50MbFS(b *testing.B) {
benchmarkPutObject(b, "FS", 50*1024*1024)
benchmarkPutObject(b, "FS", 50*humanize.MiByte)
}
// BenchmarkPutObject50MbXL - Benchmark XL.PutObject() for object size of 50MB.
func BenchmarkPutObject50MbXL(b *testing.B) {
benchmarkPutObject(b, "XL", 50*1024*1024)
benchmarkPutObject(b, "XL", 50*humanize.MiByte)
}
// parallel benchmarks for ObjectLayer.PutObject() .
@ -505,61 +507,61 @@ func BenchmarkParallelPutObjectVerySmallXL(b *testing.B) {
// BenchmarkParallelPutObject10KbFS - BenchmarkParallel FS.PutObject() for object size of 10KB.
func BenchmarkParallelPutObject10KbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 10*1024)
benchmarkPutObjectParallel(b, "FS", 10*humanize.KiByte)
}
// BenchmarkParallelPutObject10KbXL - BenchmarkParallel XL.PutObject() for object size of 10KB.
func BenchmarkParallelPutObject10KbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 10*1024)
benchmarkPutObjectParallel(b, "XL", 10*humanize.KiByte)
}
// BenchmarkParallelPutObject100KbFS - BenchmarkParallel FS.PutObject() for object size of 100KB.
func BenchmarkParallelPutObject100KbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 100*1024)
benchmarkPutObjectParallel(b, "FS", 100*humanize.KiByte)
}
// BenchmarkParallelPutObject100KbXL - BenchmarkParallel XL.PutObject() for object size of 100KB.
func BenchmarkParallelPutObject100KbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 100*1024)
benchmarkPutObjectParallel(b, "XL", 100*humanize.KiByte)
}
// BenchmarkParallelPutObject1MbFS - BenchmarkParallel FS.PutObject() for object size of 1MB.
func BenchmarkParallelPutObject1MbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 1024*1024)
benchmarkPutObjectParallel(b, "FS", 1*humanize.MiByte)
}
// BenchmarkParallelPutObject1MbXL - BenchmarkParallel XL.PutObject() for object size of 1MB.
func BenchmarkParallelPutObject1MbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 1024*1024)
benchmarkPutObjectParallel(b, "XL", 1*humanize.MiByte)
}
// BenchmarkParallelPutObject5MbFS - BenchmarkParallel FS.PutObject() for object size of 5MB.
func BenchmarkParallelPutObject5MbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 5*1024*1024)
benchmarkPutObjectParallel(b, "FS", 5*humanize.MiByte)
}
// BenchmarkParallelPutObject5MbXL - BenchmarkParallel XL.PutObject() for object size of 5MB.
func BenchmarkParallelPutObject5MbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 5*1024*1024)
benchmarkPutObjectParallel(b, "XL", 5*humanize.MiByte)
}
// BenchmarkParallelPutObject10MbFS - BenchmarkParallel FS.PutObject() for object size of 10MB.
func BenchmarkParallelPutObject10MbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 10*1024*1024)
benchmarkPutObjectParallel(b, "FS", 10*humanize.MiByte)
}
// BenchmarkParallelPutObject10MbXL - BenchmarkParallel XL.PutObject() for object size of 10MB.
func BenchmarkParallelPutObject10MbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 10*1024*1024)
benchmarkPutObjectParallel(b, "XL", 10*humanize.MiByte)
}
// BenchmarkParallelPutObject25MbFS - BenchmarkParallel FS.PutObject() for object size of 25MB.
func BenchmarkParallelPutObject25MbFS(b *testing.B) {
benchmarkPutObjectParallel(b, "FS", 25*1024*1024)
benchmarkPutObjectParallel(b, "FS", 25*humanize.MiByte)
}
// BenchmarkParallelPutObject25MbXL - BenchmarkParallel XL.PutObject() for object size of 25MB.
func BenchmarkParallelPutObject25MbXL(b *testing.B) {
benchmarkPutObjectParallel(b, "XL", 25*1024*1024)
benchmarkPutObjectParallel(b, "XL", 25*humanize.MiByte)
}

@ -22,14 +22,16 @@ import (
"runtime"
"strings"
"sync"
humanize "github.com/dustin/go-humanize"
)
const (
// Block size used for all internal operations version 1.
blockSizeV1 = 10 * 1024 * 1024 // 10MiB.
blockSizeV1 = 10 * humanize.MiByte
// Staging buffer read size for all internal operations version 1.
readSizeV1 = 1 * 1024 * 1024 // 1MiB.
readSizeV1 = 1 * humanize.MiByte
// Buckets meta prefix.
bucketMetaPrefix = "buckets"

@ -28,6 +28,8 @@ import (
"strconv"
"sync"
"testing"
humanize "github.com/dustin/go-humanize"
)
// Type to capture different modifications to API request to simulate failure cases.
@ -58,7 +60,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
bytesData := []struct {
byteData []byte
}{
{generateBytesData(6 * 1024 * 1024)},
{generateBytesData(6 * humanize.MiByte)},
}
// set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := []struct {
@ -205,7 +207,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
bytesData := []struct {
byteData []byte
}{
{generateBytesData(6 * 1024 * 1024)},
{generateBytesData(6 * humanize.MiByte)},
}
// set of inputs for uploading the objects before tests for downloading is done.
putObjectInputs := []struct {
@ -421,9 +423,9 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
credentials credential, t *testing.T) {
objectName := "test-object"
bytesDataLen := 65 * 1024
bytesDataLen := 65 * humanize.KiByte
bytesData := bytes.Repeat([]byte{'a'}, bytesDataLen)
oneKData := bytes.Repeat([]byte("a"), 1024)
oneKData := bytes.Repeat([]byte("a"), 1*humanize.KiByte)
err := initEventNotifier(obj)
if err != nil {
@ -465,7 +467,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName: objectName,
data: bytesData,
dataLen: len(bytesData),
chunkSize: 64 * 1024, // 64k
chunkSize: 64 * humanize.KiByte,
expectedContent: []byte{},
expectedRespStatus: http.StatusOK,
accessKey: credentials.AccessKeyID,
@ -479,7 +481,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName: objectName,
data: bytesData,
dataLen: len(bytesData),
chunkSize: 1 * 1024, // 1k
chunkSize: 1 * humanize.KiByte,
expectedContent: []byte{},
expectedRespStatus: http.StatusOK,
accessKey: credentials.AccessKeyID,
@ -493,7 +495,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName: objectName,
data: bytesData,
dataLen: len(bytesData),
chunkSize: 64 * 1024, // 64k
chunkSize: 64 * humanize.KiByte,
expectedContent: []byte{},
expectedRespStatus: http.StatusForbidden,
accessKey: "",
@ -507,7 +509,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName: objectName,
data: bytesData,
dataLen: len(bytesData),
chunkSize: 64 * 1024, // 64k
chunkSize: 64 * humanize.KiByte,
expectedContent: []byte{},
expectedRespStatus: http.StatusBadRequest,
accessKey: credentials.AccessKeyID,
@ -522,7 +524,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName: objectName,
data: bytesData,
dataLen: len(bytesData),
chunkSize: 100 * 1024, // 100k
chunkSize: 100 * humanize.KiByte,
expectedContent: []byte{},
expectedRespStatus: http.StatusOK,
accessKey: credentials.AccessKeyID,
@ -696,7 +698,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
}
objectName := "test-object"
// byte data for PutObject.
bytesData := generateBytesData(6 * 1024 * 1024)
bytesData := generateBytesData(6 * humanize.KiByte)
copySourceHeader := http.Header{}
copySourceHeader.Set("X-Amz-Copy-Source", "somewhere")
@ -940,7 +942,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
bytesData := []struct {
byteData []byte
}{
{generateBytesData(6 * 1024 * 1024)},
{generateBytesData(6 * humanize.KiByte)},
}
buffers := []*bytes.Buffer{
@ -1404,7 +1406,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
// Parts with size greater than 5 MB.
// Generating a 6MB byte array.
validPart := bytes.Repeat([]byte("abcdef"), 1024*1024)
validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)
validPartMD5 := getMD5Hash(validPart)
// Create multipart parts.
// Need parts to be uploaded before CompleteMultiPartUpload can be called tested.
@ -1759,7 +1761,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
// Parts with size greater than 5 MB.
// Generating a 6MB byte array.
validPart := bytes.Repeat([]byte("abcdef"), 1024*1024)
validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)
validPartMD5 := getMD5Hash(validPart)
// Create multipart parts.
// Need parts to be uploaded before AbortMultiPartUpload can be called tested.
@ -1914,7 +1916,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
bytesData := []struct {
byteData []byte
}{
{generateBytesData(6 * 1024 * 1024)},
{generateBytesData(6 * humanize.MiByte)},
}
// set of inputs for uploading the objects before tests for deleting them is done.

@ -22,6 +22,8 @@ import (
"math/rand"
"strconv"
humanize "github.com/dustin/go-humanize"
. "gopkg.in/check.v1"
)
@ -98,8 +100,8 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, c TestErr
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
// Create a byte array of 5MB.
data := bytes.Repeat([]byte("0123456789abcdef"), 5*1024*1024/16)
// Create a byte array of 5MiB.
data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16)
completedParts := completeMultipartUpload{}
for i := 1; i <= 10; i++ {
expectedMD5Sumhex := getMD5Hash(data)

@ -29,12 +29,13 @@ import (
"sync/atomic"
"syscall"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/disk"
)
const (
fsMinFreeSpace = 1024 * 1024 * 1024 // Min 1GiB free space.
fsMinFreeInodes = 10000 // Min 10000.
fsMinFreeSpace = 1 * humanize.GiByte // Min 1GiB free space.
fsMinFreeInodes = 10000 // Min 10000.
maxAllowedIOError = 5
)

@ -26,6 +26,8 @@ import (
"net/http/httptest"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
)
const (
@ -264,7 +266,7 @@ func testPostPolicyHandler(obj ObjectLayer, instanceType string, t TestErrHandle
// Failed with entity too large.
{
objectName: "test",
data: bytes.Repeat([]byte("a"), 1024*1024+1),
data: bytes.Repeat([]byte("a"), (1*humanize.MiByte)+1),
expectedRespStatus: http.StatusBadRequest,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,

@ -23,13 +23,15 @@ import (
"strings"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
)
// Tests if we generate storage info.
func TestStorageInfoMsg(t *testing.T) {
infoStorage := StorageInfo{
Total: 1024 * 1024 * 1024 * 10,
Free: 1024 * 1024 * 1024 * 2,
Total: 10 * humanize.GiByte,
Free: 2 * humanize.GiByte,
Backend: struct {
Type BackendType
OnlineDisks int

@ -31,6 +31,8 @@ import (
"sync"
"time"
humanize "github.com/dustin/go-humanize"
. "gopkg.in/check.v1"
)
@ -2367,7 +2369,7 @@ func (s *TestSuiteCommon) TestObjectValidMD5(c *C) {
// Create a byte array of 5MB.
// content for the object to be uploaded.
data := bytes.Repeat([]byte("0123456789abcdef"), 5*1024*1024/16)
data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16)
// calculate md5Sum of the data.
md5SumBase64 := getMD5HashBase64(data)
@ -2440,7 +2442,7 @@ func (s *TestSuiteCommon) TestObjectMultipart(c *C) {
// content for the part to be uploaded.
// Create a byte array of 5MB.
data := bytes.Repeat([]byte("0123456789abcdef"), 5*1024*1024/16)
data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16)
// calculate md5Sum of the data.
md5SumBase64 := getMD5HashBase64(data)

@ -28,6 +28,7 @@ import (
"net/http"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/minio/sha256-simd"
)
@ -151,7 +152,7 @@ func calculateSeedSignature(r *http.Request) (signature string, date time.Time,
return newSignature, date, ErrNone
}
const maxLineLength = 4096 // assumed <= bufio.defaultBufSize 4KiB.
const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB
// lineTooLong is generated as chunk header is bigger than 4KiB.
var errLineTooLong = errors.New("header line too long")

@ -27,6 +27,7 @@ import (
"encoding/json"
humanize "github.com/dustin/go-humanize"
"github.com/pkg/profile"
)
@ -112,9 +113,9 @@ func checkValidMD5(md5 string) ([]byte, error) {
/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
const (
// maximum object size per PUT request is 5GiB
maxObjectSize = 1024 * 1024 * 1024 * 5
// minimum Part size for multipart upload is 5MB
minPartSize = 1024 * 1024 * 5
maxObjectSize = 5 * humanize.GiByte
// minimum Part size for multipart upload is 5MiB
minPartSize = 5 * humanize.MiByte
// maximum Part ID for multipart upload is 10000 (Acceptable values range from 1 to 10000 inclusive)
maxPartID = 10000
)

@ -27,6 +27,7 @@ import (
"strings"
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/set"
)
@ -403,7 +404,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
bucketName := getRandomBucketName()
objectName := "object"
objectSize := 1024
objectSize := 1 * humanize.KiByte
// Create bucket.
err = obj.MakeBucket(bucketName)
@ -474,7 +475,7 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
bucketName := getRandomBucketName()
objectName := "object"
objectSize := 1024
objectSize := 1 * humanize.KiByte
// Create bucket.
err = obj.MakeBucket(bucketName)
@ -823,7 +824,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
bucketName := getRandomBucketName()
objectName := "object"
objectSize := 1024
objectSize := 1 * humanize.KiByte
// Create bucket.
err = obj.MakeBucket(bucketName)

@ -21,9 +21,9 @@ import (
"strconv"
"testing"
"time"
)
const MiB = 1024 * 1024
humanize "github.com/dustin/go-humanize"
)
// Test xlMetaV1.AddObjectPart()
func TestAddObjectPart(t *testing.T) {
@ -54,7 +54,7 @@ func TestAddObjectPart(t *testing.T) {
for _, testCase := range testCases {
if testCase.expectedIndex > -1 {
partNumString := strconv.Itoa(testCase.partNum)
xlMeta.AddObjectPart(testCase.partNum, "part."+partNumString, "etag."+partNumString, int64(testCase.partNum+MiB))
xlMeta.AddObjectPart(testCase.partNum, "part."+partNumString, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte))
}
if index := objectPartIndex(xlMeta.Parts, testCase.partNum); index != testCase.expectedIndex {
@ -86,7 +86,7 @@ func TestObjectPartIndex(t *testing.T) {
// Add some parts for testing.
for _, testCase := range testCases {
partNumString := strconv.Itoa(testCase.partNum)
xlMeta.AddObjectPart(testCase.partNum, "part."+partNumString, "etag."+partNumString, int64(testCase.partNum+MiB))
xlMeta.AddObjectPart(testCase.partNum, "part."+partNumString, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte))
}
// Add failure test case.
@ -115,7 +115,7 @@ func TestObjectToPartOffset(t *testing.T) {
// Total size of all parts is 5,242,899 bytes.
for _, partNum := range []int{1, 2, 4, 5, 7} {
partNumString := strconv.Itoa(partNum)
xlMeta.AddObjectPart(partNum, "part."+partNumString, "etag."+partNumString, int64(partNum+MiB))
xlMeta.AddObjectPart(partNum, "part."+partNumString, "etag."+partNumString, int64(partNum+humanize.MiByte))
}
testCases := []struct {
@ -125,15 +125,15 @@ func TestObjectToPartOffset(t *testing.T) {
expectedErr error
}{
{0, 0, 0, nil},
{MiB, 0, MiB, nil},
{1 + MiB, 1, 0, nil},
{2 + MiB, 1, 1, nil},
{1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil},
{1 + humanize.MiByte, 1, 0, nil},
{2 + humanize.MiByte, 1, 1, nil},
// Its valid for zero sized object.
{-1, 0, -1, nil},
// Max fffset is always (size - 1).
{(1 + 2 + 4 + 5 + 7) + (5 * MiB) - 1, 4, 1048582, nil},
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil},
// Error if offset is size.
{(1 + 2 + 4 + 5 + 7) + (5 * MiB), 0, 0, InvalidRange{}},
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}},
}
// Test them.

@ -25,6 +25,8 @@ import (
"reflect"
"testing"
"time"
humanize "github.com/dustin/go-humanize"
)
func TestRepeatPutObjectPart(t *testing.T) {
@ -49,14 +51,14 @@ func TestRepeatPutObjectPart(t *testing.T) {
if err != nil {
t.Fatal(err)
}
fiveMBBytes := bytes.Repeat([]byte("a"), 5*1024*1024)
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
md5Hex := getMD5Hash(fiveMBBytes)
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*1024*1024, bytes.NewReader(fiveMBBytes), md5Hex, "")
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*humanize.MiByte, bytes.NewReader(fiveMBBytes), md5Hex, "")
if err != nil {
t.Fatal(err)
}
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*1024*1024, bytes.NewReader(fiveMBBytes), md5Hex, "")
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*humanize.MiByte, bytes.NewReader(fiveMBBytes), md5Hex, "")
if err != nil {
t.Fatal(err)
}
@ -279,7 +281,7 @@ func TestHealing(t *testing.T) {
bucket := "bucket"
object := "object"
data := make([]byte, 1*1024*1024)
data := make([]byte, 1*humanize.MiByte)
length := int64(len(data))
_, err = rand.Read(data)
if err != nil {

@ -23,6 +23,7 @@ import (
"strings"
"sync"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/objcache"
)
@ -42,7 +43,7 @@ const (
uploadsJSONFile = "uploads.json"
// 8GiB cache by default.
maxCacheSize = 8 * 1024 * 1024 * 1024
maxCacheSize = 8 * humanize.GiByte
// Maximum erasure blocks.
maxErasureBlocks = 16

Loading…
Cancel
Save