Move storageclass config handling into cmd/config/storageclass (#8360)
Continuation of the changes done in PR #8351 to refactor, add tests and move global handling into a more idiomatic style for Go as packages.master
parent
002ac82631
commit
3b8adf7528
@ -0,0 +1,230 @@ |
||||
/* |
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package storageclass |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"github.com/minio/minio/cmd/config" |
||||
"github.com/minio/minio/pkg/env" |
||||
) |
||||
|
||||
// Standard constants for all storage class
|
||||
const ( |
||||
// Reduced redundancy storage class
|
||||
RRS = "REDUCED_REDUNDANCY" |
||||
// Standard storage class
|
||||
STANDARD = "STANDARD" |
||||
) |
||||
|
||||
// Standard constats for config info storage class
|
||||
const ( |
||||
// Reduced redundancy storage class environment variable
|
||||
RRSEnv = "MINIO_STORAGE_CLASS_RRS" |
||||
// Standard storage class environment variable
|
||||
StandardEnv = "MINIO_STORAGE_CLASS_STANDARD" |
||||
|
||||
// Supported storage class scheme is EC
|
||||
schemePrefix = "EC" |
||||
|
||||
// Min parity disks
|
||||
minParityDisks = 2 |
||||
|
||||
// Default RRS parity is always minimum parity.
|
||||
defaultRRSParity = minParityDisks |
||||
) |
||||
|
||||
// StorageClass - holds storage class information
|
||||
type StorageClass struct { |
||||
Parity int |
||||
} |
||||
|
||||
// Config storage class configuration
|
||||
type Config struct { |
||||
Standard StorageClass `json:"standard"` |
||||
RRS StorageClass `json:"rrs"` |
||||
} |
||||
|
||||
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
|
||||
func (sCfg *Config) UnmarshalJSON(data []byte) error { |
||||
type Alias Config |
||||
aux := &struct { |
||||
*Alias |
||||
}{ |
||||
Alias: (*Alias)(sCfg), |
||||
} |
||||
return json.Unmarshal(data, &aux) |
||||
} |
||||
|
||||
// IsValid - returns true if input string is a valid
|
||||
// storage class kind supported.
|
||||
func IsValid(sc string) bool { |
||||
return sc == RRS || sc == STANDARD |
||||
} |
||||
|
||||
// UnmarshalText unmarshals storage class from its textual form into
|
||||
// storageClass structure.
|
||||
func (sc *StorageClass) UnmarshalText(b []byte) error { |
||||
scStr := string(b) |
||||
if scStr == "" { |
||||
return nil |
||||
} |
||||
s, err := parseStorageClass(scStr) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
sc.Parity = s.Parity |
||||
return nil |
||||
} |
||||
|
||||
// MarshalText - marshals storage class string.
|
||||
func (sc *StorageClass) MarshalText() ([]byte, error) { |
||||
if sc.Parity != 0 { |
||||
return []byte(fmt.Sprintf("%s:%d", schemePrefix, sc.Parity)), nil |
||||
} |
||||
return []byte(""), nil |
||||
} |
||||
|
||||
func (sc *StorageClass) String() string { |
||||
if sc.Parity != 0 { |
||||
return fmt.Sprintf("%s:%d", schemePrefix, sc.Parity) |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
// Parses given storageClassEnv and returns a storageClass structure.
|
||||
// Supported Storage Class format is "Scheme:Number of parity disks".
|
||||
// Currently only supported scheme is "EC".
|
||||
func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) { |
||||
s := strings.Split(storageClassEnv, ":") |
||||
|
||||
// only two elements allowed in the string - "scheme" and "number of parity disks"
|
||||
if len(s) > 2 { |
||||
return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Too many sections in " + storageClassEnv) |
||||
} else if len(s) < 2 { |
||||
return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Too few sections in " + storageClassEnv) |
||||
} |
||||
|
||||
// only allowed scheme is "EC"
|
||||
if s[0] != schemePrefix { |
||||
return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Unsupported scheme " + s[0] + ". Supported scheme is EC") |
||||
} |
||||
|
||||
// Number of parity disks should be integer
|
||||
parityDisks, err := strconv.Atoi(s[1]) |
||||
if err != nil { |
||||
return StorageClass{}, config.ErrStorageClassValue(err) |
||||
} |
||||
|
||||
return StorageClass{ |
||||
Parity: parityDisks, |
||||
}, nil |
||||
} |
||||
|
||||
// Validates the parity disks.
|
||||
func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) { |
||||
if ssParity == 0 && rrsParity == 0 { |
||||
return nil |
||||
} |
||||
|
||||
// SS parity disks should be greater than or equal to minParityDisks.
|
||||
// Parity below minParityDisks is not supported.
|
||||
if ssParity < minParityDisks { |
||||
return fmt.Errorf("Standard storage class parity %d should be greater than or equal to %d", |
||||
ssParity, minParityDisks) |
||||
} |
||||
|
||||
// RRS parity disks should be greater than or equal to minParityDisks.
|
||||
// Parity below minParityDisks is not supported.
|
||||
if rrsParity < minParityDisks { |
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks) |
||||
} |
||||
|
||||
if ssParity > drivesPerSet/2 { |
||||
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, drivesPerSet/2) |
||||
} |
||||
|
||||
if rrsParity > drivesPerSet/2 { |
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, drivesPerSet/2) |
||||
} |
||||
|
||||
if ssParity > 0 && rrsParity > 0 { |
||||
if ssParity < rrsParity { |
||||
return fmt.Errorf("Standard storage class parity disks %d should be greater than or equal to Reduced redundancy storage class parity disks %d", ssParity, rrsParity) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// GetParityForSC - Returns the data and parity drive count based on storage class
|
||||
// If storage class is set using the env vars MINIO_STORAGE_CLASS_RRS and MINIO_STORAGE_CLASS_STANDARD
|
||||
// or config.json fields
|
||||
// -- corresponding values are returned
|
||||
// If storage class is not set during startup, default values are returned
|
||||
// -- Default for Reduced Redundancy Storage class is, parity = 2 and data = N-Parity
|
||||
// -- Default for Standard Storage class is, parity = N/2, data = N/2
|
||||
// If storage class is empty
|
||||
// -- standard storage class is assumed and corresponding data and parity is returned
|
||||
func (sCfg Config) GetParityForSC(sc string) (parity int) { |
||||
switch strings.TrimSpace(sc) { |
||||
case RRS: |
||||
// set the rrs parity if available
|
||||
if sCfg.RRS.Parity == 0 { |
||||
return defaultRRSParity |
||||
} |
||||
return sCfg.RRS.Parity |
||||
default: |
||||
return sCfg.Standard.Parity |
||||
} |
||||
} |
||||
|
||||
// LookupConfig - lookup storage class config and override with valid environment settings if any.
|
||||
func LookupConfig(cfg Config, drivesPerSet int) (Config, error) { |
||||
var err error |
||||
|
||||
// Check for environment variables and parse into storageClass struct
|
||||
if ssc := env.Get(StandardEnv, cfg.Standard.String()); ssc != "" { |
||||
cfg.Standard, err = parseStorageClass(ssc) |
||||
if err != nil { |
||||
return cfg, err |
||||
} |
||||
if cfg.Standard.Parity == 0 { |
||||
cfg.Standard.Parity = drivesPerSet / 2 |
||||
} |
||||
} |
||||
|
||||
if rrsc := env.Get(RRSEnv, cfg.RRS.String()); rrsc != "" { |
||||
cfg.RRS, err = parseStorageClass(rrsc) |
||||
if err != nil { |
||||
return cfg, err |
||||
} |
||||
if cfg.RRS.Parity == 0 { |
||||
cfg.RRS.Parity = defaultRRSParity |
||||
} |
||||
} |
||||
|
||||
// Validation is done after parsing both the storage classes. This is needed because we need one
|
||||
// storage class value to deduce the correct value of the other storage class.
|
||||
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, drivesPerSet); err != nil { |
||||
return cfg, err |
||||
} |
||||
|
||||
return cfg, nil |
||||
} |
@ -0,0 +1,163 @@ |
||||
/* |
||||
* MinIO Cloud Storage, (C) 2017-2019 MinIO, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package storageclass |
||||
|
||||
import ( |
||||
"errors" |
||||
"reflect" |
||||
"testing" |
||||
) |
||||
|
||||
func TestParseStorageClass(t *testing.T) { |
||||
tests := []struct { |
||||
storageClassEnv string |
||||
wantSc StorageClass |
||||
expectedError error |
||||
}{ |
||||
{"EC:3", StorageClass{ |
||||
Parity: 3}, |
||||
nil}, |
||||
{"EC:4", StorageClass{ |
||||
Parity: 4}, |
||||
nil}, |
||||
{"AB:4", StorageClass{ |
||||
Parity: 4}, |
||||
errors.New("Unsupported scheme AB. Supported scheme is EC")}, |
||||
{"EC:4:5", StorageClass{ |
||||
Parity: 4}, |
||||
errors.New("Too many sections in EC:4:5")}, |
||||
{"EC:A", StorageClass{ |
||||
Parity: 4}, |
||||
errors.New(`strconv.Atoi: parsing "A": invalid syntax`)}, |
||||
{"AB", StorageClass{ |
||||
Parity: 4}, |
||||
errors.New("Too few sections in AB")}, |
||||
} |
||||
for i, tt := range tests { |
||||
gotSc, err := parseStorageClass(tt.storageClassEnv) |
||||
if err != nil && tt.expectedError == nil { |
||||
t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) |
||||
return |
||||
} |
||||
if err == nil && tt.expectedError != nil { |
||||
t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) |
||||
return |
||||
} |
||||
if tt.expectedError == nil && !reflect.DeepEqual(gotSc, tt.wantSc) { |
||||
t.Errorf("Test %d, Expected %v, got %v", i+1, tt.wantSc, gotSc) |
||||
return |
||||
} |
||||
if tt.expectedError != nil && err.Error() != tt.expectedError.Error() { |
||||
t.Errorf("Test %d, Expected `%v`, got `%v`", i+1, tt.expectedError, err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestValidateParity(t *testing.T) { |
||||
tests := []struct { |
||||
rrsParity int |
||||
ssParity int |
||||
success bool |
||||
drivesPerSet int |
||||
}{ |
||||
{2, 4, true, 16}, |
||||
{3, 3, true, 16}, |
||||
{0, 0, true, 16}, |
||||
{1, 4, false, 16}, |
||||
{7, 6, false, 16}, |
||||
{9, 0, false, 16}, |
||||
{9, 9, false, 16}, |
||||
{2, 9, false, 16}, |
||||
{9, 2, false, 16}, |
||||
} |
||||
for i, tt := range tests { |
||||
err := validateParity(tt.ssParity, tt.rrsParity, tt.drivesPerSet) |
||||
if err != nil && tt.success { |
||||
t.Errorf("Test %d, Expected success, got %s", i+1, err) |
||||
} |
||||
if err == nil && !tt.success { |
||||
t.Errorf("Test %d, Expected failure, got success", i+1) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestParityCount(t *testing.T) { |
||||
tests := []struct { |
||||
sc string |
||||
disksCount int |
||||
expectedData int |
||||
expectedParity int |
||||
}{ |
||||
{RRS, 16, 14, 2}, |
||||
{STANDARD, 16, 8, 8}, |
||||
{"", 16, 8, 8}, |
||||
{RRS, 16, 9, 7}, |
||||
{STANDARD, 16, 10, 6}, |
||||
{"", 16, 9, 7}, |
||||
} |
||||
for i, tt := range tests { |
||||
scfg := Config{ |
||||
Standard: StorageClass{ |
||||
Parity: 8, |
||||
}, |
||||
RRS: StorageClass{ |
||||
Parity: 0, |
||||
}, |
||||
} |
||||
// Set env var for test case 4
|
||||
if i+1 == 4 { |
||||
scfg.RRS.Parity = 7 |
||||
} |
||||
// Set env var for test case 5
|
||||
if i+1 == 5 { |
||||
scfg.Standard.Parity = 6 |
||||
} |
||||
// Set env var for test case 6
|
||||
if i+1 == 6 { |
||||
scfg.Standard.Parity = 7 |
||||
} |
||||
parity := scfg.GetParityForSC(tt.sc) |
||||
if (tt.disksCount - parity) != tt.expectedData { |
||||
t.Errorf("Test %d, Expected data disks %d, got %d", i+1, tt.expectedData, tt.disksCount-parity) |
||||
continue |
||||
} |
||||
if parity != tt.expectedParity { |
||||
t.Errorf("Test %d, Expected parity disks %d, got %d", i+1, tt.expectedParity, parity) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Test IsValid method with valid and invalid inputs
|
||||
func TestIsValidStorageClassKind(t *testing.T) { |
||||
tests := []struct { |
||||
sc string |
||||
want bool |
||||
}{ |
||||
{"STANDARD", true}, |
||||
{"REDUCED_REDUNDANCY", true}, |
||||
{"", false}, |
||||
{"INVALID", false}, |
||||
{"123", false}, |
||||
{"MINIO_STORAGE_CLASS_RRS", false}, |
||||
{"MINIO_STORAGE_CLASS_STANDARD", false}, |
||||
} |
||||
for i, tt := range tests { |
||||
if got := IsValid(tt.sc); got != tt.want { |
||||
t.Errorf("Test %d, Expected Storage Class to be %t, got %t", i+1, tt.want, got) |
||||
} |
||||
} |
||||
} |
@ -1,213 +0,0 @@ |
||||
/* |
||||
* MinIO Cloud Storage, (C) 2017 MinIO, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"fmt" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"github.com/minio/minio/cmd/config" |
||||
) |
||||
|
||||
const ( |
||||
// metadata entry for storage class
|
||||
amzStorageClass = "x-amz-storage-class" |
||||
// Canonical metadata entry for storage class
|
||||
amzStorageClassCanonical = "X-Amz-Storage-Class" |
||||
// Reduced redundancy storage class
|
||||
reducedRedundancyStorageClass = "REDUCED_REDUNDANCY" |
||||
// Standard storage class
|
||||
standardStorageClass = "STANDARD" |
||||
// Reduced redundancy storage class environment variable
|
||||
reducedRedundancyStorageClassEnv = "MINIO_STORAGE_CLASS_RRS" |
||||
// Standard storage class environment variable
|
||||
standardStorageClassEnv = "MINIO_STORAGE_CLASS_STANDARD" |
||||
// Supported storage class scheme is EC
|
||||
supportedStorageClassScheme = "EC" |
||||
// Minimum parity disks
|
||||
minimumParityDisks = 2 |
||||
defaultRRSParity = 2 |
||||
) |
||||
|
||||
// Struct to hold storage class
|
||||
type storageClass struct { |
||||
Scheme string |
||||
Parity int |
||||
} |
||||
|
||||
type storageClassConfig struct { |
||||
Standard storageClass `json:"standard"` |
||||
RRS storageClass `json:"rrs"` |
||||
} |
||||
|
||||
// Validate SS and RRS parity when unmarshalling JSON.
|
||||
func (sCfg *storageClassConfig) UnmarshalJSON(data []byte) error { |
||||
type Alias storageClassConfig |
||||
aux := &struct { |
||||
*Alias |
||||
}{ |
||||
Alias: (*Alias)(sCfg), |
||||
} |
||||
if err := json.Unmarshal(data, &aux); err != nil { |
||||
return err |
||||
} |
||||
return validateParity(aux.Standard.Parity, aux.RRS.Parity) |
||||
} |
||||
|
||||
// Validate if storage class in metadata
|
||||
// Only Standard and RRS Storage classes are supported
|
||||
func isValidStorageClassMeta(sc string) bool { |
||||
return sc == reducedRedundancyStorageClass || sc == standardStorageClass |
||||
} |
||||
|
||||
func (sc *storageClass) UnmarshalText(b []byte) error { |
||||
scStr := string(b) |
||||
if scStr == "" { |
||||
return nil |
||||
} |
||||
s, err := parseStorageClass(scStr) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
sc.Parity = s.Parity |
||||
sc.Scheme = s.Scheme |
||||
return nil |
||||
} |
||||
|
||||
func (sc *storageClass) MarshalText() ([]byte, error) { |
||||
if sc.Scheme != "" && sc.Parity != 0 { |
||||
return []byte(fmt.Sprintf("%s:%d", sc.Scheme, sc.Parity)), nil |
||||
} |
||||
return []byte(""), nil |
||||
} |
||||
|
||||
// Parses given storageClassEnv and returns a storageClass structure.
|
||||
// Supported Storage Class format is "Scheme:Number of parity disks".
|
||||
// Currently only supported scheme is "EC".
|
||||
func parseStorageClass(storageClassEnv string) (sc storageClass, err error) { |
||||
s := strings.Split(storageClassEnv, ":") |
||||
|
||||
// only two elements allowed in the string - "scheme" and "number of parity disks"
|
||||
if len(s) > 2 { |
||||
return storageClass{}, config.ErrStorageClassValue(nil).Msg("Too many sections in " + storageClassEnv) |
||||
} else if len(s) < 2 { |
||||
return storageClass{}, config.ErrStorageClassValue(nil).Msg("Too few sections in " + storageClassEnv) |
||||
} |
||||
|
||||
// only allowed scheme is "EC"
|
||||
if s[0] != supportedStorageClassScheme { |
||||
return storageClass{}, config.ErrStorageClassValue(nil).Msg("Unsupported scheme " + s[0] + ". Supported scheme is EC") |
||||
} |
||||
|
||||
// Number of parity disks should be integer
|
||||
parityDisks, err := strconv.Atoi(s[1]) |
||||
if err != nil { |
||||
return storageClass{}, config.ErrStorageClassValue(err) |
||||
} |
||||
|
||||
sc = storageClass{ |
||||
Scheme: s[0], |
||||
Parity: parityDisks, |
||||
} |
||||
|
||||
return sc, nil |
||||
} |
||||
|
||||
// Validates the parity disks.
|
||||
func validateParity(ssParity, rrsParity int) (err error) { |
||||
if ssParity == 0 && rrsParity == 0 { |
||||
return nil |
||||
} |
||||
|
||||
if !globalIsXL { |
||||
return fmt.Errorf("Setting storage class only allowed for erasure coding mode") |
||||
} |
||||
|
||||
// SS parity disks should be greater than or equal to minimumParityDisks. Parity below minimumParityDisks is not recommended.
|
||||
if ssParity > 0 && ssParity < minimumParityDisks { |
||||
return fmt.Errorf("Standard storage class parity %d should be greater than or equal to %d", ssParity, minimumParityDisks) |
||||
} |
||||
|
||||
// RRS parity disks should be greater than or equal to minimumParityDisks. Parity below minimumParityDisks is not recommended.
|
||||
if rrsParity > 0 && rrsParity < minimumParityDisks { |
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minimumParityDisks) |
||||
} |
||||
|
||||
if ssParity > globalXLSetDriveCount/2 { |
||||
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, globalXLSetDriveCount/2) |
||||
} |
||||
|
||||
if rrsParity > globalXLSetDriveCount/2 { |
||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, globalXLSetDriveCount/2) |
||||
} |
||||
|
||||
if ssParity > 0 && rrsParity > 0 { |
||||
if ssParity < rrsParity { |
||||
return fmt.Errorf("Standard storage class parity disks %d should be greater than or equal to Reduced redundancy storage class parity disks %d", ssParity, rrsParity) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Returns the data and parity drive count based on storage class
|
||||
// If storage class is set using the env vars MINIO_STORAGE_CLASS_RRS and MINIO_STORAGE_CLASS_STANDARD
|
||||
// or config.json fields
|
||||
// -- corresponding values are returned
|
||||
// If storage class is not set during startup, default values are returned
|
||||
// -- Default for Reduced Redundancy Storage class is, parity = 2 and data = N-Parity
|
||||
// -- Default for Standard Storage class is, parity = N/2, data = N/2
|
||||
// If storage class is empty
|
||||
// -- standard storage class is assumed and corresponding data and parity is returned
|
||||
func getRedundancyCount(sc string, totalDisks int) (data, parity int) { |
||||
parity = totalDisks / 2 |
||||
switch sc { |
||||
case reducedRedundancyStorageClass: |
||||
if globalRRStorageClass.Parity != 0 { |
||||
// set the rrs parity if available
|
||||
parity = globalRRStorageClass.Parity |
||||
} else { |
||||
// else fall back to default value
|
||||
parity = defaultRRSParity |
||||
} |
||||
case standardStorageClass, "": |
||||
if globalStandardStorageClass.Parity != 0 { |
||||
// set the standard parity if available
|
||||
parity = globalStandardStorageClass.Parity |
||||
} |
||||
} |
||||
// data is always totalDisks - parity
|
||||
return totalDisks - parity, parity |
||||
} |
||||
|
||||
// Returns per object readQuorum and writeQuorum
|
||||
// readQuorum is the minimum required disks to read data.
|
||||
// writeQuorum is the minimum required disks to write data.
|
||||
func objectQuorumFromMeta(ctx context.Context, xl xlObjects, partsMetaData []xlMetaV1, errs []error) (objectReadQuorum, objectWriteQuorum int, err error) { |
||||
// get the latest updated Metadata and a count of all the latest updated xlMeta(s)
|
||||
latestXLMeta, err := getLatestXLMeta(ctx, partsMetaData, errs) |
||||
|
||||
if err != nil { |
||||
return 0, 0, err |
||||
} |
||||
|
||||
// Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks
|
||||
// from latestXLMeta to get the quorum
|
||||
return latestXLMeta.Erasure.DataBlocks, latestXLMeta.Erasure.DataBlocks + 1, nil |
||||
} |
@ -1,346 +0,0 @@ |
||||
/* |
||||
* MinIO Cloud Storage, (C) 2017 MinIO, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"errors" |
||||
"reflect" |
||||
"testing" |
||||
) |
||||
|
||||
func TestParseStorageClass(t *testing.T) { |
||||
ExecObjectLayerTest(t, testParseStorageClass) |
||||
} |
||||
|
||||
func testParseStorageClass(obj ObjectLayer, instanceType string, t TestErrHandler) { |
||||
tests := []struct { |
||||
storageClassEnv string |
||||
wantSc storageClass |
||||
expectedError error |
||||
}{ |
||||
{"EC:3", storageClass{ |
||||
Scheme: "EC", |
||||
Parity: 3}, |
||||
nil}, |
||||
{"EC:4", storageClass{ |
||||
Scheme: "EC", |
||||
Parity: 4}, |
||||
nil}, |
||||
{"AB:4", storageClass{ |
||||
Scheme: "EC", |
||||
Parity: 4}, |
||||
errors.New("Unsupported scheme AB. Supported scheme is EC")}, |
||||
{"EC:4:5", storageClass{ |
||||
Scheme: "EC", |
||||
Parity: 4}, |
||||
errors.New("Too many sections in EC:4:5")}, |
||||
{"AB", storageClass{ |
||||
Scheme: "EC", |
||||
Parity: 4}, |
||||
errors.New("Too few sections in AB")}, |
||||
} |
||||
for i, tt := range tests { |
||||
gotSc, err := parseStorageClass(tt.storageClassEnv) |
||||
if err != nil && tt.expectedError == nil { |
||||
t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) |
||||
return |
||||
} |
||||
if err == nil && tt.expectedError != nil { |
||||
t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) |
||||
return |
||||
} |
||||
if tt.expectedError == nil && !reflect.DeepEqual(gotSc, tt.wantSc) { |
||||
t.Errorf("Test %d, Expected %v, got %v", i+1, tt.wantSc, gotSc) |
||||
return |
||||
} |
||||
if tt.expectedError != nil && err.Error() != tt.expectedError.Error() { |
||||
t.Errorf("Test %d, Expected `%v`, got `%v`", i+1, tt.expectedError, err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestValidateParity(t *testing.T) { |
||||
ExecObjectLayerTestWithDirs(t, testValidateParity) |
||||
} |
||||
|
||||
func testValidateParity(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) { |
||||
// Reset global storage class flags
|
||||
resetGlobalStorageEnvs() |
||||
|
||||
// Set proper envs for a single node XL setup.
|
||||
saveIsXL := globalIsXL |
||||
defer func() { |
||||
globalIsXL = saveIsXL |
||||
}() |
||||
globalIsXL = true |
||||
saveSetDriveCount := globalXLSetDriveCount |
||||
defer func() { |
||||
globalXLSetDriveCount = saveSetDriveCount |
||||
}() |
||||
globalXLSetCount = len(dirs) |
||||
|
||||
tests := []struct { |
||||
rrsParity int |
||||
ssParity int |
||||
success bool |
||||
}{ |
||||
{2, 4, true}, |
||||
{3, 3, true}, |
||||
{1, 4, false}, |
||||
{7, 6, false}, |
||||
{9, 0, false}, |
||||
{9, 9, false}, |
||||
{2, 9, false}, |
||||
} |
||||
for i, tt := range tests { |
||||
err := validateParity(tt.ssParity, tt.rrsParity) |
||||
if err != nil && tt.success { |
||||
t.Errorf("Test %d, Expected success, got %s", i+1, err) |
||||
} |
||||
if err == nil && !tt.success { |
||||
t.Errorf("Test %d, Expected failure, got success", i+1) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestRedundancyCount(t *testing.T) { |
||||
ExecObjectLayerTestWithDirs(t, testGetRedundancyCount) |
||||
} |
||||
|
||||
func testGetRedundancyCount(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) { |
||||
// Reset global storage class flags
|
||||
resetGlobalStorageEnvs() |
||||
xl := obj.(*xlObjects) |
||||
|
||||
tests := []struct { |
||||
sc string |
||||
disksCount int |
||||
expectedData int |
||||
expectedParity int |
||||
}{ |
||||
{reducedRedundancyStorageClass, len(xl.storageDisks), 14, 2}, |
||||
{standardStorageClass, len(xl.storageDisks), 8, 8}, |
||||
{"", len(xl.storageDisks), 8, 8}, |
||||
{reducedRedundancyStorageClass, len(xl.storageDisks), 9, 7}, |
||||
{standardStorageClass, len(xl.storageDisks), 10, 6}, |
||||
{"", len(xl.storageDisks), 9, 7}, |
||||
} |
||||
for i, tt := range tests { |
||||
// Set env var for test case 4
|
||||
if i+1 == 4 { |
||||
globalRRStorageClass.Parity = 7 |
||||
} |
||||
// Set env var for test case 5
|
||||
if i+1 == 5 { |
||||
globalStandardStorageClass.Parity = 6 |
||||
} |
||||
// Set env var for test case 6
|
||||
if i+1 == 6 { |
||||
globalStandardStorageClass.Parity = 7 |
||||
} |
||||
data, parity := getRedundancyCount(tt.sc, tt.disksCount) |
||||
if data != tt.expectedData { |
||||
t.Errorf("Test %d, Expected data disks %d, got %d", i+1, tt.expectedData, data) |
||||
return |
||||
} |
||||
if parity != tt.expectedParity { |
||||
t.Errorf("Test %d, Expected parity disks %d, got %d", i+1, tt.expectedParity, parity) |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestObjectQuorumFromMeta(t *testing.T) { |
||||
ExecObjectLayerTestWithDirs(t, testObjectQuorumFromMeta) |
||||
} |
||||
|
||||
func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) { |
||||
// Reset global storage class flags
|
||||
resetGlobalStorageEnvs() |
||||
bucket := getRandomBucketName() |
||||
|
||||
var opts ObjectOptions |
||||
// make data with more than one part
|
||||
partCount := 3 |
||||
data := bytes.Repeat([]byte("a"), int(globalPutPartSize)*partCount) |
||||
xl := obj.(*xlObjects) |
||||
xlDisks := xl.storageDisks |
||||
|
||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, globalMinioDefaultRegion) |
||||
if err != nil { |
||||
t.Fatalf("Failed to make a bucket %v", err) |
||||
} |
||||
|
||||
// Object for test case 1 - No StorageClass defined, no MetaData in PutObject
|
||||
object1 := "object1" |
||||
_, err = obj.PutObject(context.Background(), bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) |
||||
if err != nil { |
||||
t.Fatalf("Failed to putObject %v", err) |
||||
} |
||||
|
||||
parts1, errs1 := readAllXLMetadata(context.Background(), xlDisks, bucket, object1) |
||||
|
||||
// Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class
|
||||
object2 := "object2" |
||||
metadata2 := make(map[string]string) |
||||
metadata2["x-amz-storage-class"] = reducedRedundancyStorageClass |
||||
_, err = obj.PutObject(context.Background(), bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata2}) |
||||
if err != nil { |
||||
t.Fatalf("Failed to putObject %v", err) |
||||
} |
||||
|
||||
parts2, errs2 := readAllXLMetadata(context.Background(), xlDisks, bucket, object2) |
||||
|
||||
// Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class
|
||||
object3 := "object3" |
||||
metadata3 := make(map[string]string) |
||||
metadata3["x-amz-storage-class"] = standardStorageClass |
||||
_, err = obj.PutObject(context.Background(), bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata3}) |
||||
if err != nil { |
||||
t.Fatalf("Failed to putObject %v", err) |
||||
} |
||||
|
||||
parts3, errs3 := readAllXLMetadata(context.Background(), xlDisks, bucket, object3) |
||||
|
||||
// Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class
|
||||
object4 := "object4" |
||||
metadata4 := make(map[string]string) |
||||
metadata4["x-amz-storage-class"] = standardStorageClass |
||||
globalStandardStorageClass = storageClass{ |
||||
Parity: 6, |
||||
Scheme: "EC", |
||||
} |
||||
|
||||
_, err = obj.PutObject(context.Background(), bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata4}) |
||||
if err != nil { |
||||
t.Fatalf("Failed to putObject %v", err) |
||||
} |
||||
|
||||
parts4, errs4 := readAllXLMetadata(context.Background(), xlDisks, bucket, object4) |
||||
|
||||
// Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class
|
||||
// Reset global storage class flags
|
||||
resetGlobalStorageEnvs() |
||||
object5 := "object5" |
||||
metadata5 := make(map[string]string) |
||||
metadata5["x-amz-storage-class"] = reducedRedundancyStorageClass |
||||
globalRRStorageClass = storageClass{ |
||||
Parity: 2, |
||||
Scheme: "EC", |
||||
} |
||||
|
||||
_, err = obj.PutObject(context.Background(), bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata5}) |
||||
if err != nil { |
||||
t.Fatalf("Failed to putObject %v", err) |
||||
} |
||||
|
||||
parts5, errs5 := readAllXLMetadata(context.Background(), xlDisks, bucket, object5) |
||||
|
||||
// Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class
|
||||
// Reset global storage class flags
|
||||
resetGlobalStorageEnvs() |
||||
object6 := "object6" |
||||
metadata6 := make(map[string]string) |
||||
metadata6["x-amz-storage-class"] = standardStorageClass |
||||
globalRRStorageClass = storageClass{ |
||||
Parity: 2, |
||||
Scheme: "EC", |
||||
} |
||||
|
||||
_, err = obj.PutObject(context.Background(), bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata6}) |
||||
if err != nil { |
||||
t.Fatalf("Failed to putObject %v", err) |
||||
} |
||||
|
||||
parts6, errs6 := readAllXLMetadata(context.Background(), xlDisks, bucket, object6) |
||||
|
||||
// Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class
|
||||
// Reset global storage class flags
|
||||
resetGlobalStorageEnvs() |
||||
object7 := "object7" |
||||
metadata7 := make(map[string]string) |
||||
metadata7["x-amz-storage-class"] = reducedRedundancyStorageClass |
||||
globalStandardStorageClass = storageClass{ |
||||
Parity: 5, |
||||
Scheme: "EC", |
||||
} |
||||
|
||||
_, err = obj.PutObject(context.Background(), bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata7}) |
||||
if err != nil { |
||||
t.Fatalf("Failed to putObject %v", err) |
||||
} |
||||
|
||||
parts7, errs7 := readAllXLMetadata(context.Background(), xlDisks, bucket, object7) |
||||
|
||||
tests := []struct { |
||||
parts []xlMetaV1 |
||||
errs []error |
||||
expectedReadQuorum int |
||||
expectedWriteQuorum int |
||||
expectedError error |
||||
}{ |
||||
{parts1, errs1, 8, 9, nil}, |
||||
{parts2, errs2, 14, 15, nil}, |
||||
{parts3, errs3, 8, 9, nil}, |
||||
{parts4, errs4, 10, 11, nil}, |
||||
{parts5, errs5, 14, 15, nil}, |
||||
{parts6, errs6, 8, 9, nil}, |
||||
{parts7, errs7, 14, 15, nil}, |
||||
} |
||||
for i, tt := range tests { |
||||
actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(context.Background(), *xl, tt.parts, tt.errs) |
||||
if tt.expectedError != nil && err == nil { |
||||
t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) |
||||
return |
||||
} |
||||
if tt.expectedError == nil && err != nil { |
||||
t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) |
||||
return |
||||
} |
||||
if tt.expectedReadQuorum != actualReadQuorum { |
||||
t.Errorf("Test %d, Expected Read Quorum %d, got %d", i+1, tt.expectedReadQuorum, actualReadQuorum) |
||||
return |
||||
} |
||||
if tt.expectedWriteQuorum != actualWriteQuorum { |
||||
t.Errorf("Test %d, Expected Write Quorum %d, got %d", i+1, tt.expectedWriteQuorum, actualWriteQuorum) |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Test isValidStorageClassMeta method with valid and invalid inputs
|
||||
func TestIsValidStorageClassMeta(t *testing.T) { |
||||
tests := []struct { |
||||
sc string |
||||
want bool |
||||
}{ |
||||
{"STANDARD", true}, |
||||
{"REDUCED_REDUNDANCY", true}, |
||||
{"", false}, |
||||
{"INVALID", false}, |
||||
{"123", false}, |
||||
{"MINIO_STORAGE_CLASS_RRS", false}, |
||||
{"MINIO_STORAGE_CLASS_STANDARD", false}, |
||||
} |
||||
for i, tt := range tests { |
||||
if got := isValidStorageClassMeta(tt.sc); got != tt.want { |
||||
t.Errorf("Test %d, Expected Storage Class to be %t, got %t", i+1, tt.want, got) |
||||
} |
||||
} |
||||
} |
Loading…
Reference in new issue