Add large bucket support for erasure coded backend (#5160)
This PR implements an object layer which combines input erasure sets of XL layers into a unified namespace. This object layer extends the existing erasure coded implementation, it is assumed in this design that providing > 16 disks is a static configuration as well i.e if you started the setup with 32 disks with 4 sets 8 disks per pack then you would need to provide 4 sets always. Some design details and restrictions: - Objects are distributed using consistent ordering to a unique erasure coded layer. - Each pack has its own dsync so locks are synchronized properly at pack (erasure layer). - Each pack still has a maximum of 16 disks requirement, you can start with multiple such sets statically. - Static sets set of disks and cannot be changed, there is no elastic expansion allowed. - Static sets set of disks and cannot be changed, there is no elastic removal allowed. - ListObjects() across sets can be noticeably slower since List happens on all servers, and is merged at this sets layer. Fixes #5465 Fixes #5464 Fixes #5461 Fixes #5460 Fixes #5459 Fixes #5458 Fixes #5460 Fixes #5488 Fixes #5489 Fixes #5497 Fixes #5496master
parent
dd80256151
commit
fb96779a8a
@ -0,0 +1,251 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"fmt" |
||||
"strings" |
||||
|
||||
"github.com/minio/minio-go/pkg/set" |
||||
"github.com/minio/minio/pkg/ellipses" |
||||
) |
||||
|
||||
// This file implements and supports ellipses pattern for
|
||||
// `minio server` command line arguments.
|
||||
|
||||
// Maximum number of unique args supported on the command line.
|
||||
const ( |
||||
serverCommandLineArgsMax = 32 |
||||
) |
||||
|
||||
// Endpoint set represents parsed ellipses values, also provides
|
||||
// methods to get the sets of endpoints.
|
||||
type endpointSet struct { |
||||
argPatterns []ellipses.ArgPattern |
||||
endpoints []string // Endpoints saved from previous GetEndpoints().
|
||||
setIndexes [][]uint64 // All the sets.
|
||||
} |
||||
|
||||
// Supported set sizes this is used to find the optimal
|
||||
// single set size.
|
||||
var setSizes = []uint64{4, 6, 8, 10, 12, 14, 16} |
||||
|
||||
// getDivisibleSize - returns a greatest common divisor of
|
||||
// all the ellipses sizes.
|
||||
func getDivisibleSize(totalSizes []uint64) (result uint64) { |
||||
gcd := func(x, y uint64) uint64 { |
||||
for y != 0 { |
||||
x, y = y, x%y |
||||
} |
||||
return x |
||||
} |
||||
result = totalSizes[0] |
||||
for i := 1; i < len(totalSizes); i++ { |
||||
result = gcd(result, totalSizes[i]) |
||||
} |
||||
return result |
||||
} |
||||
|
||||
// getSetIndexes returns list of indexes which provides the set size
|
||||
// on each index, this function also determines the final set size
|
||||
// The final set size has the affinity towards choosing smaller
|
||||
// indexes (total sets)
|
||||
func getSetIndexes(args []string, totalSizes []uint64) (setIndexes [][]uint64, err error) { |
||||
if len(totalSizes) == 0 || len(args) == 0 { |
||||
return nil, errInvalidArgument |
||||
} |
||||
|
||||
setIndexes = make([][]uint64, len(totalSizes)) |
||||
for i, totalSize := range totalSizes { |
||||
// Check if totalSize has minimum range upto setSize
|
||||
if totalSize < setSizes[0] { |
||||
return nil, fmt.Errorf("Invalid inputs (%s). Ellipses range or number of args %d should be atleast divisible by least possible set size %d", |
||||
args[i], totalSize, setSizes[0]) |
||||
} |
||||
} |
||||
|
||||
var setSize uint64 |
||||
|
||||
commonSize := getDivisibleSize(totalSizes) |
||||
if commonSize > setSizes[len(setSizes)-1] { |
||||
prevD := commonSize / setSizes[0] |
||||
for _, i := range setSizes { |
||||
if commonSize%i == 0 { |
||||
d := commonSize / i |
||||
if d <= prevD { |
||||
prevD = d |
||||
setSize = i |
||||
} |
||||
} |
||||
} |
||||
} else { |
||||
setSize = commonSize |
||||
} |
||||
|
||||
// isValidSetSize - checks whether given count is a valid set size for erasure coding.
|
||||
isValidSetSize := func(count uint64) bool { |
||||
return (count >= setSizes[0] && count <= setSizes[len(setSizes)-1] && count%2 == 0) |
||||
} |
||||
|
||||
// Check whether setSize is with the supported range.
|
||||
if !isValidSetSize(setSize) { |
||||
return nil, fmt.Errorf("Invalid inputs (%s). Ellipses range or number of args %d should be atleast divisible by least possible set size %d", |
||||
args, setSize, setSizes[0]) |
||||
} |
||||
|
||||
for i := range totalSizes { |
||||
for j := uint64(0); j < totalSizes[i]/setSize; j++ { |
||||
setIndexes[i] = append(setIndexes[i], setSize) |
||||
} |
||||
} |
||||
|
||||
return setIndexes, nil |
||||
} |
||||
|
||||
// Returns all the expanded endpoints, each argument is expanded separately.
|
||||
func (s endpointSet) getEndpoints() (endpoints []string) { |
||||
if len(s.endpoints) != 0 { |
||||
return s.endpoints |
||||
} |
||||
for _, argPattern := range s.argPatterns { |
||||
for _, lbls := range argPattern.Expand() { |
||||
endpoints = append(endpoints, strings.Join(lbls, "")) |
||||
} |
||||
} |
||||
s.endpoints = endpoints |
||||
return endpoints |
||||
} |
||||
|
||||
// Get returns the sets representation of the endpoints
|
||||
// this function also intelligently decides on what will
|
||||
// be the right set size etc.
|
||||
func (s endpointSet) Get() (sets [][]string) { |
||||
var k = uint64(0) |
||||
endpoints := s.getEndpoints() |
||||
for i := range s.setIndexes { |
||||
for j := range s.setIndexes[i] { |
||||
sets = append(sets, endpoints[k:s.setIndexes[i][j]+k]) |
||||
k = s.setIndexes[i][j] + k |
||||
} |
||||
} |
||||
|
||||
return sets |
||||
} |
||||
|
||||
// Return the total size for each argument patterns.
|
||||
func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 { |
||||
var totalSizes []uint64 |
||||
for _, argPattern := range argPatterns { |
||||
var totalSize uint64 = 1 |
||||
for _, p := range argPattern { |
||||
totalSize = totalSize * uint64(len(p.Seq)) |
||||
} |
||||
totalSizes = append(totalSizes, totalSize) |
||||
} |
||||
return totalSizes |
||||
} |
||||
|
||||
// Parses all arguments and returns an endpointSet which is a collection
|
||||
// of endpoints following the ellipses pattern, this is what is used
|
||||
// by the object layer for initializing itself.
|
||||
func parseEndpointSet(args ...string) (ep endpointSet, err error) { |
||||
var argPatterns = make([]ellipses.ArgPattern, len(args)) |
||||
for i, arg := range args { |
||||
patterns, err := ellipses.FindEllipsesPatterns(arg) |
||||
if err != nil { |
||||
return endpointSet{}, err |
||||
} |
||||
argPatterns[i] = patterns |
||||
} |
||||
|
||||
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns)) |
||||
if err != nil { |
||||
return endpointSet{}, err |
||||
} |
||||
|
||||
ep.argPatterns = argPatterns |
||||
|
||||
return ep, nil |
||||
} |
||||
|
||||
// Parses all ellipses input arguments, expands them into corresponding
|
||||
// list of endpoints chunked evenly in accordance with a specific
|
||||
// set size.
|
||||
// For example: {1...64} is divided into 4 sets each of size 16.
|
||||
// This applies to even distributed setup syntax as well.
|
||||
func getAllSets(args ...string) ([][]string, error) { |
||||
if len(args) == 0 { |
||||
return nil, errInvalidArgument |
||||
} |
||||
|
||||
var setArgs [][]string |
||||
if !ellipses.HasEllipses(args...) { |
||||
var setIndexes [][]uint64 |
||||
// Check if we have more one args.
|
||||
if len(args) > 1 { |
||||
var err error |
||||
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} else { |
||||
// We are in FS setup, proceed forward.
|
||||
setIndexes = [][]uint64{[]uint64{uint64(len(args))}} |
||||
} |
||||
s := endpointSet{ |
||||
endpoints: args, |
||||
setIndexes: setIndexes, |
||||
} |
||||
setArgs = s.Get() |
||||
} else { |
||||
s, err := parseEndpointSet(args...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
setArgs = s.Get() |
||||
} |
||||
|
||||
uniqueArgs := set.NewStringSet() |
||||
for _, sargs := range setArgs { |
||||
for _, arg := range sargs { |
||||
if uniqueArgs.Contains(arg) { |
||||
return nil, fmt.Errorf("Input args (%s) has duplicate ellipses", args) |
||||
} |
||||
uniqueArgs.Add(arg) |
||||
} |
||||
} |
||||
|
||||
return setArgs, nil |
||||
} |
||||
|
||||
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
|
||||
// both ellipses and without ellipses transparently.
|
||||
func createServerEndpoints(serverAddr string, args ...string) (string, EndpointList, SetupType, int, int, error) { |
||||
setArgs, err := getAllSets(args...) |
||||
if err != nil { |
||||
return serverAddr, nil, -1, 0, 0, err |
||||
} |
||||
|
||||
var endpoints EndpointList |
||||
var setupType SetupType |
||||
serverAddr, endpoints, setupType, err = CreateEndpoints(serverAddr, setArgs...) |
||||
if err != nil { |
||||
return serverAddr, nil, -1, 0, 0, err |
||||
} |
||||
|
||||
return serverAddr, endpoints, setupType, len(setArgs), len(setArgs[0]), nil |
||||
} |
@ -0,0 +1,388 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"fmt" |
||||
"reflect" |
||||
"testing" |
||||
|
||||
"github.com/minio/minio/pkg/ellipses" |
||||
) |
||||
|
||||
// Tests create endpoints with ellipses and without.
|
||||
func TestCreateServerEndpoints(t *testing.T) { |
||||
testCases := []struct { |
||||
serverAddr string |
||||
args []string |
||||
success bool |
||||
}{ |
||||
// Invalid input.
|
||||
{"", []string{}, false}, |
||||
// Range cannot be negative.
|
||||
{":9000", []string{"/export1{-1...1}"}, false}, |
||||
// Range cannot start bigger than end.
|
||||
{":9000", []string{"/export1{64...1}"}, false}, |
||||
// Range can only be numeric.
|
||||
{":9000", []string{"/export1{a...z}"}, false}, |
||||
// Duplicate disks not allowed.
|
||||
{":9000", []string{"/export1{1...32}", "/export1{1...32}"}, false}, |
||||
// Same host cannot export same disk on two ports - special case localhost.
|
||||
{":9001", []string{"http://localhost:900{1...2}/export{1...64}"}, false}, |
||||
|
||||
// Valid inputs.
|
||||
{":9000", []string{"/export1"}, true}, |
||||
{":9000", []string{"/export1", "/export2", "/export3", "/export4"}, true}, |
||||
{":9000", []string{"/export1{1...64}"}, true}, |
||||
{":9000", []string{"/export1{01...64}"}, true}, |
||||
{":9000", []string{"/export1{1...32}", "/export1{33...64}"}, true}, |
||||
{":9001", []string{"http://localhost:9001/export{1...64}"}, true}, |
||||
{":9001", []string{"http://localhost:9001/export{01...64}"}, true}, |
||||
} |
||||
|
||||
for i, testCase := range testCases { |
||||
_, _, _, _, _, err := createServerEndpoints(testCase.serverAddr, testCase.args...) |
||||
if err != nil && testCase.success { |
||||
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err) |
||||
} |
||||
if err == nil && !testCase.success { |
||||
t.Errorf("Test %d: Expected failure but passed instead", i+1) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Test tests calculating set indexes.
|
||||
func TestGetSetIndexes(t *testing.T) { |
||||
testCases := []struct { |
||||
args []string |
||||
totalSizes []uint64 |
||||
indexes [][]uint64 |
||||
success bool |
||||
}{ |
||||
// Invalid inputs.
|
||||
{ |
||||
[]string{"data{1...27}"}, |
||||
[]uint64{27}, |
||||
nil, |
||||
false, |
||||
}, |
||||
// Valid inputs.
|
||||
{ |
||||
[]string{"data{1...64}"}, |
||||
[]uint64{64}, |
||||
[][]uint64{[]uint64{16, 16, 16, 16}}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"data{1...24}"}, |
||||
[]uint64{24}, |
||||
[][]uint64{[]uint64{12, 12}}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"data/controller{1...11}/export{1...8}"}, |
||||
[]uint64{88}, |
||||
[][]uint64{[]uint64{8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8}}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"data{1...4}"}, |
||||
[]uint64{4}, |
||||
[][]uint64{[]uint64{4}}, |
||||
true, |
||||
}, |
||||
} |
||||
|
||||
for i, testCase := range testCases { |
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) { |
||||
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes) |
||||
if err != nil && testCase.success { |
||||
t.Errorf("Expected success but failed instead %s", err) |
||||
} |
||||
if err == nil && !testCase.success { |
||||
t.Errorf("Expected failure but passed instead") |
||||
} |
||||
if !reflect.DeepEqual(testCase.indexes, gotIndexes) { |
||||
t.Errorf("Expected %v, got %v", testCase.indexes, gotIndexes) |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func getSequences(start int, number int, paddinglen int) (seq []string) { |
||||
for i := start; i <= number; i++ { |
||||
if paddinglen == 0 { |
||||
seq = append(seq, fmt.Sprintf("%d", i)) |
||||
} else { |
||||
seq = append(seq, fmt.Sprintf(fmt.Sprintf("%%0%dd", paddinglen), i)) |
||||
} |
||||
} |
||||
return seq |
||||
} |
||||
|
||||
// Test tests parses endpoint ellipses input pattern.
|
||||
func TestParseEndpointSet(t *testing.T) { |
||||
testCases := []struct { |
||||
arg string |
||||
es endpointSet |
||||
success bool |
||||
}{ |
||||
// Tests invalid inputs.
|
||||
{ |
||||
"...", |
||||
endpointSet{}, |
||||
false, |
||||
}, |
||||
// Indivisible range.
|
||||
{ |
||||
"{1...27}", |
||||
endpointSet{}, |
||||
false, |
||||
}, |
||||
// No range specified.
|
||||
{ |
||||
"{...}", |
||||
endpointSet{}, |
||||
false, |
||||
}, |
||||
// Invalid range.
|
||||
{ |
||||
"http://minio{2...3}/export/set{1...0}", |
||||
endpointSet{}, |
||||
false, |
||||
}, |
||||
// Range cannot be smaller than 4 minimum.
|
||||
{ |
||||
"/export{1..2}", |
||||
endpointSet{}, |
||||
false, |
||||
}, |
||||
// Unsupported characters.
|
||||
{ |
||||
"/export/test{1...2O}", |
||||
endpointSet{}, |
||||
false, |
||||
}, |
||||
// Tests valid inputs.
|
||||
{ |
||||
"/export/set{1...64}", |
||||
endpointSet{ |
||||
[]ellipses.ArgPattern{ |
||||
[]ellipses.Pattern{ |
||||
{ |
||||
"/export/set", |
||||
"", |
||||
getSequences(1, 64, 0), |
||||
}, |
||||
}, |
||||
}, |
||||
nil, |
||||
[][]uint64{[]uint64{16, 16, 16, 16}}, |
||||
}, |
||||
true, |
||||
}, |
||||
// Valid input for distributed setup.
|
||||
{ |
||||
"http://minio{2...3}/export/set{1...64}", |
||||
endpointSet{ |
||||
[]ellipses.ArgPattern{ |
||||
[]ellipses.Pattern{ |
||||
{ |
||||
"", |
||||
"", |
||||
getSequences(1, 64, 0), |
||||
}, |
||||
{ |
||||
"http://minio", |
||||
"/export/set", |
||||
getSequences(2, 3, 0), |
||||
}, |
||||
}, |
||||
}, |
||||
nil, |
||||
[][]uint64{[]uint64{16, 16, 16, 16, 16, 16, 16, 16}}, |
||||
}, |
||||
true, |
||||
}, |
||||
// Supporting some advanced cases.
|
||||
{ |
||||
"http://minio{1...64}.mydomain.net/data", |
||||
endpointSet{ |
||||
[]ellipses.ArgPattern{ |
||||
[]ellipses.Pattern{ |
||||
{ |
||||
"http://minio", |
||||
".mydomain.net/data", |
||||
getSequences(1, 64, 0), |
||||
}, |
||||
}, |
||||
}, |
||||
nil, |
||||
[][]uint64{[]uint64{16, 16, 16, 16}}, |
||||
}, |
||||
true, |
||||
}, |
||||
{ |
||||
"http://rack{1...4}.mydomain.minio{1...16}/data", |
||||
endpointSet{ |
||||
[]ellipses.ArgPattern{ |
||||
[]ellipses.Pattern{ |
||||
{ |
||||
"", |
||||
"/data", |
||||
getSequences(1, 16, 0), |
||||
}, |
||||
{ |
||||
"http://rack", |
||||
".mydomain.minio", |
||||
getSequences(1, 4, 0), |
||||
}, |
||||
}, |
||||
}, |
||||
nil, |
||||
[][]uint64{[]uint64{16, 16, 16, 16}}, |
||||
}, |
||||
true, |
||||
}, |
||||
// Supporting kubernetes cases.
|
||||
{ |
||||
"http://minio{0...15}.mydomain.net/data{0...1}", |
||||
endpointSet{ |
||||
[]ellipses.ArgPattern{ |
||||
[]ellipses.Pattern{ |
||||
{ |
||||
"", |
||||
"", |
||||
getSequences(0, 1, 0), |
||||
}, |
||||
{ |
||||
"http://minio", |
||||
".mydomain.net/data", |
||||
getSequences(0, 15, 0), |
||||
}, |
||||
}, |
||||
}, |
||||
nil, |
||||
[][]uint64{[]uint64{16, 16}}, |
||||
}, |
||||
true, |
||||
}, |
||||
// No host regex, just disks.
|
||||
{ |
||||
"http://server1/data{1...32}", |
||||
endpointSet{ |
||||
[]ellipses.ArgPattern{ |
||||
[]ellipses.Pattern{ |
||||
{ |
||||
"http://server1/data", |
||||
"", |
||||
getSequences(1, 32, 0), |
||||
}, |
||||
}, |
||||
}, |
||||
nil, |
||||
[][]uint64{[]uint64{16, 16}}, |
||||
}, |
||||
true, |
||||
}, |
||||
// No host regex, just disks with two position numerics.
|
||||
{ |
||||
"http://server1/data{01...32}", |
||||
endpointSet{ |
||||
[]ellipses.ArgPattern{ |
||||
[]ellipses.Pattern{ |
||||
{ |
||||
"http://server1/data", |
||||
"", |
||||
getSequences(1, 32, 2), |
||||
}, |
||||
}, |
||||
}, |
||||
nil, |
||||
[][]uint64{[]uint64{16, 16}}, |
||||
}, |
||||
true, |
||||
}, |
||||
// More than 2 ellipses are supported as well.
|
||||
{ |
||||
"http://minio{2...3}/export/set{1...64}/test{1...2}", |
||||
endpointSet{ |
||||
[]ellipses.ArgPattern{ |
||||
[]ellipses.Pattern{ |
||||
{ |
||||
"", |
||||
"", |
||||
getSequences(1, 2, 0), |
||||
}, |
||||
{ |
||||
"", |
||||
"/test", |
||||
getSequences(1, 64, 0), |
||||
}, |
||||
{ |
||||
"http://minio", |
||||
"/export/set", |
||||
getSequences(2, 3, 0), |
||||
}, |
||||
}, |
||||
}, |
||||
nil, |
||||
[][]uint64{[]uint64{16, 16, 16, 16, 16, 16, 16, 16, |
||||
16, 16, 16, 16, 16, 16, 16, 16}}, |
||||
}, |
||||
true, |
||||
}, |
||||
// More than 1 ellipses per argument for standalone setup.
|
||||
{ |
||||
"/export{1...10}/disk{1...10}", |
||||
endpointSet{ |
||||
[]ellipses.ArgPattern{ |
||||
[]ellipses.Pattern{ |
||||
{ |
||||
"", |
||||
"", |
||||
getSequences(1, 10, 0), |
||||
}, |
||||
{ |
||||
"/export", |
||||
"/disk", |
||||
getSequences(1, 10, 0), |
||||
}, |
||||
}, |
||||
}, |
||||
nil, |
||||
[][]uint64{[]uint64{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}}, |
||||
}, |
||||
true, |
||||
}, |
||||
} |
||||
|
||||
for i, testCase := range testCases { |
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) { |
||||
gotEs, err := parseEndpointSet(testCase.arg) |
||||
if err != nil && testCase.success { |
||||
t.Errorf("Expected success but failed instead %s", err) |
||||
} |
||||
if err == nil && !testCase.success { |
||||
t.Errorf("Expected failure but passed instead") |
||||
} |
||||
if !reflect.DeepEqual(testCase.es, gotEs) { |
||||
t.Errorf("Expected %v, got %v", testCase.es, gotEs) |
||||
} |
||||
}) |
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,147 +0,0 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"fmt" |
||||
"sync" |
||||
|
||||
humanize "github.com/dustin/go-humanize" |
||||
) |
||||
|
||||
// Helper to generate integer sequences into a friendlier user consumable format.
|
||||
func formatInts(i int, t int) string { |
||||
if i < 10 { |
||||
if t < 10 { |
||||
return fmt.Sprintf("0%d/0%d", i, t) |
||||
} |
||||
return fmt.Sprintf("0%d/%d", i, t) |
||||
} |
||||
return fmt.Sprintf("%d/%d", i, t) |
||||
} |
||||
|
||||
// Print a given message once.
|
||||
type printOnceFunc func(msg string) |
||||
|
||||
// Print once is a constructor returning a function printing once.
|
||||
// internally print uses sync.Once to perform exactly one action.
|
||||
func printOnceFn() printOnceFunc { |
||||
var once sync.Once |
||||
return func(msg string) { |
||||
once.Do(func() { |
||||
log.Println(msg) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
// Prints custom message when healing is required for XL and Distributed XL backend.
|
||||
func printHealMsg(endpoints EndpointList, storageDisks []StorageAPI, fn printOnceFunc) { |
||||
msg := getHealMsg(endpoints, storageDisks) |
||||
fn(msg) |
||||
} |
||||
|
||||
// Disks offline and online strings..
|
||||
const ( |
||||
diskOffline = "offline" |
||||
diskOnline = "online" |
||||
) |
||||
|
||||
// Constructs a formatted heal message, when cluster is found to be in state where it requires healing.
|
||||
// healing is optional, server continues to initialize object layer after printing this message.
|
||||
// it is upto the end user to perform a heal if needed.
|
||||
func getHealMsg(endpoints EndpointList, storageDisks []StorageAPI) string { |
||||
healFmtCmd := `"mc admin heal myminio"` |
||||
msg := fmt.Sprintf("New disk(s) were found, format them by running - %s\n", |
||||
healFmtCmd) |
||||
disksInfo, _, _ := getDisksInfo(storageDisks) |
||||
for i, info := range disksInfo { |
||||
if storageDisks[i] == nil { |
||||
continue |
||||
} |
||||
msg += fmt.Sprintf( |
||||
"\n[%s] %s - %s %s", |
||||
formatInts(i+1, len(storageDisks)), |
||||
endpoints[i], |
||||
humanize.IBytes(uint64(info.Total)), |
||||
func() string { |
||||
if info.Total > 0 { |
||||
return diskOnline |
||||
} |
||||
return diskOffline |
||||
}(), |
||||
) |
||||
} |
||||
return msg |
||||
} |
||||
|
||||
// Prints regular message when we have sufficient disks to start the cluster.
|
||||
func printRegularMsg(endpoints EndpointList, storageDisks []StorageAPI, fn printOnceFunc) { |
||||
msg := getStorageInitMsg("Initializing data volume.", endpoints, storageDisks) |
||||
fn(msg) |
||||
} |
||||
|
||||
// Constructs a formatted regular message when we have sufficient disks to start the cluster.
|
||||
func getStorageInitMsg(titleMsg string, endpoints EndpointList, storageDisks []StorageAPI) string { |
||||
msg := colorBlue(titleMsg) |
||||
disksInfo, _, _ := getDisksInfo(storageDisks) |
||||
for i, info := range disksInfo { |
||||
if storageDisks[i] == nil { |
||||
continue |
||||
} |
||||
msg += fmt.Sprintf( |
||||
"\n[%s] %s - %s %s", |
||||
formatInts(i+1, len(storageDisks)), |
||||
endpoints[i], |
||||
humanize.IBytes(uint64(info.Total)), |
||||
func() string { |
||||
if info.Total > 0 { |
||||
return diskOnline |
||||
} |
||||
return diskOffline |
||||
}(), |
||||
) |
||||
} |
||||
return msg |
||||
} |
||||
|
||||
// Prints initialization message when cluster is being initialized for the first time.
|
||||
func printFormatMsg(endpoints EndpointList, storageDisks []StorageAPI, fn printOnceFunc) { |
||||
msg := getStorageInitMsg("Initializing data volume for the first time.", endpoints, storageDisks) |
||||
fn(msg) |
||||
} |
||||
|
||||
// Combines each disk errors in a newline formatted string.
|
||||
// this is a helper function in printing messages across
|
||||
// all disks.
|
||||
func combineDiskErrs(storageDisks []StorageAPI, sErrs []error) string { |
||||
var msg string |
||||
for i, disk := range storageDisks { |
||||
if disk == nil { |
||||
continue |
||||
} |
||||
if sErrs[i] == nil { |
||||
continue |
||||
} |
||||
msg += fmt.Sprintf( |
||||
"\n[%s] %s : %s", |
||||
formatInts(i+1, len(storageDisks)), |
||||
storageDisks[i], |
||||
sErrs[i], |
||||
) |
||||
} |
||||
return msg |
||||
} |
@ -1,107 +0,0 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"testing" |
||||
) |
||||
|
||||
// Tests heal message to be correct and properly formatted.
|
||||
func TestHealMsg(t *testing.T) { |
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion) |
||||
if err != nil { |
||||
t.Fatal("Unable to initialize test config", err) |
||||
} |
||||
defer os.RemoveAll(rootPath) |
||||
storageDisks, fsDirs := prepareXLStorageDisks(t) |
||||
errs := make([]error, len(storageDisks)) |
||||
defer removeRoots(fsDirs) |
||||
nilDisks := deepCopyStorageDisks(storageDisks) |
||||
nilDisks[5] = nil |
||||
authErrs := make([]error, len(storageDisks)) |
||||
authErrs[5] = errAuthentication |
||||
|
||||
args := []string{} |
||||
for i := range storageDisks { |
||||
args = append(args, fmt.Sprintf("http://10.1.10.%d:9000/d1", i+1)) |
||||
} |
||||
endpoints := mustGetNewEndpointList(args...) |
||||
|
||||
testCases := []struct { |
||||
endPoints EndpointList |
||||
storageDisks []StorageAPI |
||||
serrs []error |
||||
}{ |
||||
// Test - 1 for valid disks and errors.
|
||||
{endpoints, storageDisks, errs}, |
||||
// Test - 2 for one of the disks is nil.
|
||||
{endpoints, nilDisks, errs}, |
||||
} |
||||
|
||||
for i, testCase := range testCases { |
||||
msg := getHealMsg(testCase.endPoints, testCase.storageDisks) |
||||
if msg == "" { |
||||
t.Fatalf("Test: %d Unable to get heal message.", i+1) |
||||
} |
||||
msg = getStorageInitMsg("init", testCase.endPoints, testCase.storageDisks) |
||||
if msg == "" { |
||||
t.Fatalf("Test: %d Unable to get regular message.", i+1) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Tests disk info, validates if we do return proper disk info structure
|
||||
// even in case of certain disks not available.
|
||||
func TestDisksInfo(t *testing.T) { |
||||
storageDisks, fsDirs := prepareXLStorageDisks(t) |
||||
defer removeRoots(fsDirs) |
||||
|
||||
testCases := []struct { |
||||
storageDisks []StorageAPI |
||||
onlineDisks int |
||||
offlineDisks int |
||||
}{ |
||||
{ |
||||
storageDisks: storageDisks, |
||||
onlineDisks: 16, |
||||
offlineDisks: 0, |
||||
}, |
||||
{ |
||||
storageDisks: prepareNOfflineDisks(deepCopyStorageDisks(storageDisks), 4, t), |
||||
onlineDisks: 12, |
||||
offlineDisks: 4, |
||||
}, |
||||
{ |
||||
storageDisks: prepareNOfflineDisks(deepCopyStorageDisks(storageDisks), 16, t), |
||||
onlineDisks: 0, |
||||
offlineDisks: 16, |
||||
}, |
||||
} |
||||
|
||||
for i, testCase := range testCases { |
||||
_, onlineDisks, offlineDisks := getDisksInfo(testCase.storageDisks) |
||||
if testCase.onlineDisks != onlineDisks { |
||||
t.Errorf("Test %d: Expected online disks %d, got %d", i+1, testCase.onlineDisks, onlineDisks) |
||||
} |
||||
if testCase.offlineDisks != offlineDisks { |
||||
t.Errorf("Test %d: Expected offline disks %d, got %d", i+1, testCase.offlineDisks, offlineDisks) |
||||
} |
||||
} |
||||
|
||||
} |
@ -1,205 +0,0 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"os" |
||||
"testing" |
||||
) |
||||
|
||||
func (action InitActions) String() string { |
||||
switch action { |
||||
case InitObjectLayer: |
||||
return "InitObjectLayer" |
||||
case FormatDisks: |
||||
return "FormatDisks" |
||||
case WaitForFormatting: |
||||
return "WaitForFormatting" |
||||
case SuggestToHeal: |
||||
return "SuggestToHeal" |
||||
case WaitForAll: |
||||
return "WaitForAll" |
||||
case WaitForQuorum: |
||||
return "WaitForQuorum" |
||||
case WaitForConfig: |
||||
return "WaitForConfig" |
||||
case Abort: |
||||
return "Abort" |
||||
default: |
||||
return "Unknown" |
||||
} |
||||
} |
||||
|
||||
func TestReduceInitXLErrs(t *testing.T) { |
||||
_, fsDirs, err := prepareXL(4) |
||||
if err != nil { |
||||
t.Fatalf("Unable to initialize 'XL' object layer.") |
||||
} |
||||
|
||||
// Remove all dirs.
|
||||
for _, dir := range fsDirs { |
||||
defer os.RemoveAll(dir) |
||||
} |
||||
|
||||
storageDisks, err := initStorageDisks(mustGetNewEndpointList(fsDirs...)) |
||||
if err != nil { |
||||
t.Fatal("Unexpected error: ", err) |
||||
} |
||||
|
||||
testCases := []struct { |
||||
sErrs []error |
||||
expectedErr string |
||||
}{ |
||||
{[]error{nil, nil, nil, nil}, ""}, |
||||
{[]error{errUnformattedDisk, nil, nil, nil}, "\n[01/04] " + storageDisks[0].String() + " : unformatted disk found"}, |
||||
{[]error{errUnformattedDisk, errUnformattedDisk, nil, nil}, "\n[01/04] " + storageDisks[0].String() + " : unformatted disk found" + "\n[02/04] " + storageDisks[1].String() + " : unformatted disk found"}, |
||||
{[]error{errUnformattedDisk, errUnformattedDisk, errRPCAPIVersionUnsupported, nil}, storageDisks[2].String() + ": Unsupported rpc API version"}, |
||||
} |
||||
for i, test := range testCases { |
||||
actual := reduceInitXLErrs(storageDisks, test.sErrs) |
||||
if test.expectedErr == "" && actual != nil { |
||||
t.Errorf("Test %d expected no error but received `%s`", i+1, actual.Error()) |
||||
} |
||||
if test.expectedErr != "" && actual.Error() != test.expectedErr { |
||||
t.Errorf("Test %d expected `%s` but received `%s`", i+1, test.expectedErr, actual.Error()) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestPrepForInitXL(t *testing.T) { |
||||
// All disks are unformatted, a fresh setup.
|
||||
allUnformatted := []error{ |
||||
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, |
||||
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, |
||||
} |
||||
// All disks are formatted, possible restart of a node in a formatted setup.
|
||||
allFormatted := []error{ |
||||
nil, nil, nil, nil, |
||||
nil, nil, nil, nil, |
||||
} |
||||
// Quorum number of disks are formatted and rest are offline.
|
||||
quorumFormatted := []error{ |
||||
nil, nil, nil, nil, |
||||
nil, errDiskNotFound, errDiskNotFound, errDiskNotFound, |
||||
} |
||||
// Minority disks are corrupted, can be healed.
|
||||
minorityCorrupted := []error{ |
||||
errCorruptedFormat, errCorruptedFormat, errCorruptedFormat, nil, |
||||
nil, nil, nil, nil, |
||||
} |
||||
// Majority disks are corrupted, pretty bad setup.
|
||||
majorityCorrupted := []error{ |
||||
errCorruptedFormat, errCorruptedFormat, errCorruptedFormat, errCorruptedFormat, |
||||
errCorruptedFormat, nil, nil, nil, |
||||
} |
||||
// Quorum disks are unformatted, remaining yet to come online.
|
||||
quorumUnformatted := []error{ |
||||
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, |
||||
errUnformattedDisk, errDiskNotFound, errDiskNotFound, errDiskNotFound, |
||||
} |
||||
quorumUnformattedSomeCorrupted := []error{ |
||||
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, |
||||
errUnformattedDisk, errCorruptedFormat, errCorruptedFormat, errDiskNotFound, |
||||
} |
||||
|
||||
// Quorum number of disks not online yet.
|
||||
noQuourm := []error{ |
||||
errDiskNotFound, errDiskNotFound, errDiskNotFound, errDiskNotFound, |
||||
errDiskNotFound, nil, nil, nil, |
||||
} |
||||
// Invalid access key id.
|
||||
accessKeyIDErr := []error{ |
||||
errInvalidAccessKeyID, errInvalidAccessKeyID, errInvalidAccessKeyID, errInvalidAccessKeyID, |
||||
errInvalidAccessKeyID, nil, nil, nil, |
||||
} |
||||
// Authentication error.
|
||||
authenticationErr := []error{ |
||||
nil, nil, nil, errAuthentication, |
||||
errAuthentication, errAuthentication, errAuthentication, errAuthentication, |
||||
} |
||||
// Unsupported rpc API version.
|
||||
rpcUnsupportedVersion := []error{ |
||||
errRPCAPIVersionUnsupported, errRPCAPIVersionUnsupported, errRPCAPIVersionUnsupported, errRPCAPIVersionUnsupported, |
||||
errRPCAPIVersionUnsupported, nil, nil, nil, |
||||
} |
||||
// Server time mismatch.
|
||||
serverTimeMismatch := []error{ |
||||
errServerTimeMismatch, errServerTimeMismatch, errServerTimeMismatch, errServerTimeMismatch, |
||||
errServerTimeMismatch, nil, nil, nil, |
||||
} |
||||
// Collection of config errs.
|
||||
configErrs := []error{ |
||||
errServerTimeMismatch, errServerTimeMismatch, errRPCAPIVersionUnsupported, errAuthentication, |
||||
errInvalidAccessKeyID, nil, nil, nil, |
||||
} |
||||
// Suggest to heal under formatted disks in quorum.
|
||||
formattedDisksInQuorum := []error{ |
||||
nil, nil, nil, nil, |
||||
errUnformattedDisk, errUnformattedDisk, errDiskNotFound, errDiskNotFound, |
||||
} |
||||
// Wait for all under undecisive state.
|
||||
undecisiveErrs1 := []error{ |
||||
errDiskNotFound, nil, nil, nil, |
||||
errUnformattedDisk, errUnformattedDisk, errDiskNotFound, errDiskNotFound, |
||||
} |
||||
undecisiveErrs2 := []error{ |
||||
errDiskNotFound, errDiskNotFound, errDiskNotFound, errDiskNotFound, |
||||
errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, errUnformattedDisk, |
||||
} |
||||
|
||||
testCases := []struct { |
||||
// Params for prepForInit().
|
||||
firstDisk bool |
||||
errs []error |
||||
diskCount int |
||||
action InitActions |
||||
}{ |
||||
// Local disks.
|
||||
{true, allFormatted, 8, InitObjectLayer}, |
||||
{true, quorumFormatted, 8, InitObjectLayer}, |
||||
{true, allUnformatted, 8, FormatDisks}, |
||||
{true, quorumUnformatted, 8, WaitForAll}, |
||||
{true, quorumUnformattedSomeCorrupted, 8, Abort}, |
||||
{true, noQuourm, 8, WaitForQuorum}, |
||||
{true, minorityCorrupted, 8, SuggestToHeal}, |
||||
{true, majorityCorrupted, 8, Abort}, |
||||
// Remote disks.
|
||||
{false, allFormatted, 8, InitObjectLayer}, |
||||
{false, quorumFormatted, 8, InitObjectLayer}, |
||||
{false, allUnformatted, 8, WaitForFormatting}, |
||||
{false, quorumUnformatted, 8, WaitForAll}, |
||||
{false, quorumUnformattedSomeCorrupted, 8, Abort}, |
||||
{false, noQuourm, 8, WaitForQuorum}, |
||||
{false, minorityCorrupted, 8, SuggestToHeal}, |
||||
{false, formattedDisksInQuorum, 8, SuggestToHeal}, |
||||
{false, majorityCorrupted, 8, Abort}, |
||||
{false, undecisiveErrs1, 8, WaitForAll}, |
||||
{false, undecisiveErrs2, 8, WaitForAll}, |
||||
// Config mistakes.
|
||||
{true, accessKeyIDErr, 8, WaitForConfig}, |
||||
{true, authenticationErr, 8, WaitForConfig}, |
||||
{true, rpcUnsupportedVersion, 8, WaitForConfig}, |
||||
{true, serverTimeMismatch, 8, WaitForConfig}, |
||||
{true, configErrs, 8, WaitForConfig}, |
||||
} |
||||
for i, test := range testCases { |
||||
actual := prepForInitXL(test.firstDisk, test.errs, test.diskCount) |
||||
if actual != test.action { |
||||
t.Errorf("Test %d expected %s but received %s\n", i+1, test.action, actual) |
||||
} |
||||
} |
||||
} |
@ -1,326 +0,0 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/minio/minio/pkg/disk" |
||||
) |
||||
|
||||
const ( |
||||
// NOTE: Values indicated here are based on manual testing and
|
||||
// for best case scenarios under wide array of setups. If you
|
||||
// encounter changes in future feel free to change these values.
|
||||
|
||||
// Attempt to retry only this many number of times before
|
||||
// giving up on the remote disk entirely during initialization.
|
||||
globalStorageInitRetryThreshold = 2 |
||||
|
||||
// Attempt to retry only this many number of times before
|
||||
// giving up on the remote disk entirely after initialization.
|
||||
globalStorageRetryThreshold = 1 |
||||
|
||||
// Interval to check health status of a node whether it has
|
||||
// come back up online during initialization.
|
||||
globalStorageInitHealthCheckInterval = 15 * time.Minute |
||||
|
||||
// Interval to check health status of a node whether it has
|
||||
// come back up online.
|
||||
globalStorageHealthCheckInterval = 5 * time.Minute |
||||
) |
||||
|
||||
// Converts rpc.ServerError to underlying error. This function is
|
||||
// written so that the storageAPI errors are consistent across network
|
||||
// disks as well.
|
||||
func retryToStorageErr(err error) error { |
||||
if err == errDiskNotFoundFromNetError || err == errDiskNotFoundFromRPCShutdown { |
||||
return errDiskNotFound |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// Retry storage is an instance of StorageAPI which
|
||||
// additionally verifies upon network shutdown if the
|
||||
// underlying storage is available and is really
|
||||
// formatted. After the initialization phase it will
|
||||
// also cache when the underlying storage is offline
|
||||
// to prevent needless calls and recheck the health of
|
||||
// underlying storage in regular intervals.
|
||||
type retryStorage struct { |
||||
remoteStorage StorageAPI |
||||
maxRetryAttempts int |
||||
retryInterval time.Duration |
||||
retryUnit time.Duration |
||||
retryCap time.Duration |
||||
offline bool // Mark whether node is offline
|
||||
offlineTimestamp time.Time // Last timestamp of checking status of node
|
||||
} |
||||
|
||||
// String representation of remoteStorage.
|
||||
func (f *retryStorage) String() string { |
||||
return f.remoteStorage.String() |
||||
} |
||||
|
||||
// Reconnects to underlying remote storage.
|
||||
func (f *retryStorage) Init() (err error) { |
||||
return retryToStorageErr(f.remoteStorage.Init()) |
||||
} |
||||
|
||||
// Closes the underlying remote storage connection.
|
||||
func (f *retryStorage) Close() (err error) { |
||||
return retryToStorageErr(f.remoteStorage.Close()) |
||||
} |
||||
|
||||
// Return whether the underlying remote storage is offline
|
||||
// and, if so, try to reconnect at regular intervals to
|
||||
// restore the connection
|
||||
func (f *retryStorage) IsOffline() bool { |
||||
// Check if offline and whether enough time has lapsed since most recent check
|
||||
if f.offline && UTCNow().Sub(f.offlineTimestamp) >= f.retryInterval { |
||||
f.offlineTimestamp = UTCNow() // reset timestamp
|
||||
|
||||
if e := f.reInit(nil); e == nil { |
||||
// Connection has been re-established
|
||||
f.offline = false // Mark node as back online
|
||||
} |
||||
} |
||||
return f.offline |
||||
} |
||||
|
||||
// DiskInfo - a retryable implementation of disk info.
|
||||
func (f *retryStorage) DiskInfo() (info disk.Info, err error) { |
||||
if f.IsOffline() { |
||||
return info, errDiskNotFound |
||||
} |
||||
info, err = f.remoteStorage.DiskInfo() |
||||
if f.reInitUponDiskNotFound(err) { |
||||
info, err = f.remoteStorage.DiskInfo() |
||||
return info, retryToStorageErr(err) |
||||
} |
||||
return info, retryToStorageErr(err) |
||||
} |
||||
|
||||
// MakeVol - a retryable implementation of creating a volume.
|
||||
func (f *retryStorage) MakeVol(volume string) (err error) { |
||||
if f.IsOffline() { |
||||
return errDiskNotFound |
||||
} |
||||
err = f.remoteStorage.MakeVol(volume) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
return retryToStorageErr(f.remoteStorage.MakeVol(volume)) |
||||
} |
||||
return retryToStorageErr(err) |
||||
} |
||||
|
||||
// ListVols - a retryable implementation of listing all the volumes.
|
||||
func (f *retryStorage) ListVols() (vols []VolInfo, err error) { |
||||
if f.IsOffline() { |
||||
return vols, errDiskNotFound |
||||
} |
||||
vols, err = f.remoteStorage.ListVols() |
||||
if f.reInitUponDiskNotFound(err) { |
||||
vols, err = f.remoteStorage.ListVols() |
||||
return vols, retryToStorageErr(err) |
||||
} |
||||
return vols, retryToStorageErr(err) |
||||
} |
||||
|
||||
// StatVol - a retryable implementation of stating a volume.
|
||||
func (f *retryStorage) StatVol(volume string) (vol VolInfo, err error) { |
||||
if f.IsOffline() { |
||||
return vol, errDiskNotFound |
||||
} |
||||
vol, err = f.remoteStorage.StatVol(volume) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
vol, err = f.remoteStorage.StatVol(volume) |
||||
return vol, retryToStorageErr(err) |
||||
} |
||||
return vol, retryToStorageErr(err) |
||||
} |
||||
|
||||
// DeleteVol - a retryable implementation of deleting a volume.
|
||||
func (f *retryStorage) DeleteVol(volume string) (err error) { |
||||
if f.IsOffline() { |
||||
return errDiskNotFound |
||||
} |
||||
err = f.remoteStorage.DeleteVol(volume) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
return retryToStorageErr(f.remoteStorage.DeleteVol(volume)) |
||||
} |
||||
return retryToStorageErr(err) |
||||
} |
||||
|
||||
// PrepareFile - a retryable implementation of preparing a file.
|
||||
func (f *retryStorage) PrepareFile(volume, path string, length int64) (err error) { |
||||
if f.IsOffline() { |
||||
return errDiskNotFound |
||||
} |
||||
err = f.remoteStorage.PrepareFile(volume, path, length) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
return retryToStorageErr(f.remoteStorage.PrepareFile(volume, path, length)) |
||||
} |
||||
return retryToStorageErr(err) |
||||
} |
||||
|
||||
// AppendFile - a retryable implementation of append to a file.
|
||||
func (f *retryStorage) AppendFile(volume, path string, buffer []byte) (err error) { |
||||
if f.IsOffline() { |
||||
return errDiskNotFound |
||||
} |
||||
err = f.remoteStorage.AppendFile(volume, path, buffer) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
return retryToStorageErr(f.remoteStorage.AppendFile(volume, path, buffer)) |
||||
} |
||||
return retryToStorageErr(err) |
||||
} |
||||
|
||||
// StatFile - a retryable implementation of stating a file.
|
||||
func (f *retryStorage) StatFile(volume, path string) (fileInfo FileInfo, err error) { |
||||
if f.IsOffline() { |
||||
return fileInfo, errDiskNotFound |
||||
} |
||||
fileInfo, err = f.remoteStorage.StatFile(volume, path) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
fileInfo, err = f.remoteStorage.StatFile(volume, path) |
||||
return fileInfo, retryToStorageErr(err) |
||||
} |
||||
return fileInfo, retryToStorageErr(err) |
||||
} |
||||
|
||||
// ReadAll - a retryable implementation of reading all the content from a file.
|
||||
func (f *retryStorage) ReadAll(volume, path string) (buf []byte, err error) { |
||||
if f.IsOffline() { |
||||
return buf, errDiskNotFound |
||||
} |
||||
buf, err = f.remoteStorage.ReadAll(volume, path) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
buf, err = f.remoteStorage.ReadAll(volume, path) |
||||
return buf, retryToStorageErr(err) |
||||
} |
||||
return buf, retryToStorageErr(err) |
||||
} |
||||
|
||||
// ReadFile - a retryable implementation of reading at offset from a file.
|
||||
func (f *retryStorage) ReadFile(volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier) (m int64, err error) { |
||||
if f.IsOffline() { |
||||
return m, errDiskNotFound |
||||
} |
||||
m, err = f.remoteStorage.ReadFile(volume, path, offset, buffer, verifier) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
m, err = f.remoteStorage.ReadFile(volume, path, offset, buffer, verifier) |
||||
return m, retryToStorageErr(err) |
||||
} |
||||
return m, retryToStorageErr(err) |
||||
} |
||||
|
||||
// ListDir - a retryable implementation of listing directory entries.
|
||||
func (f *retryStorage) ListDir(volume, path string) (entries []string, err error) { |
||||
if f.IsOffline() { |
||||
return entries, errDiskNotFound |
||||
} |
||||
entries, err = f.remoteStorage.ListDir(volume, path) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
entries, err = f.remoteStorage.ListDir(volume, path) |
||||
return entries, retryToStorageErr(err) |
||||
} |
||||
return entries, retryToStorageErr(err) |
||||
} |
||||
|
||||
// DeleteFile - a retryable implementation of deleting a file.
|
||||
func (f *retryStorage) DeleteFile(volume, path string) (err error) { |
||||
if f.IsOffline() { |
||||
return errDiskNotFound |
||||
} |
||||
err = f.remoteStorage.DeleteFile(volume, path) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
return retryToStorageErr(f.remoteStorage.DeleteFile(volume, path)) |
||||
} |
||||
return retryToStorageErr(err) |
||||
} |
||||
|
||||
// RenameFile - a retryable implementation of renaming a file.
|
||||
func (f *retryStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) { |
||||
if f.IsOffline() { |
||||
return errDiskNotFound |
||||
} |
||||
err = f.remoteStorage.RenameFile(srcVolume, srcPath, dstVolume, dstPath) |
||||
if f.reInitUponDiskNotFound(err) { |
||||
return retryToStorageErr(f.remoteStorage.RenameFile(srcVolume, srcPath, dstVolume, dstPath)) |
||||
} |
||||
return retryToStorageErr(err) |
||||
} |
||||
|
||||
// Try to reinitialize the connection when we have some form of DiskNotFound error
|
||||
func (f *retryStorage) reInitUponDiskNotFound(err error) bool { |
||||
if err == errDiskNotFound || err == errDiskNotFoundFromNetError || err == errDiskNotFoundFromRPCShutdown { |
||||
return f.reInit(err) == nil |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// Connect and attempt to load the format from a disconnected node.
|
||||
// Additionally upon failure, we retry maxRetryAttempts times before
|
||||
// giving up. Essentially as a whole it would mean we are infact
|
||||
// performing 1 + maxRetryAttempts times reInit.
|
||||
func (f *retryStorage) reInit(e error) (err error) { |
||||
// Check whether node has gone offline.
|
||||
if UTCNow().Sub(f.offlineTimestamp) >= f.retryInterval { |
||||
if e == errDiskNotFoundFromNetError { // Make node offline due to network error
|
||||
f.offline = true // Marking node offline
|
||||
f.offlineTimestamp = UTCNow() |
||||
return errDiskNotFound |
||||
} |
||||
// Continue for other errors like RPC shutdown (and retry connection below)
|
||||
} |
||||
|
||||
// Close the underlying connection.
|
||||
f.remoteStorage.Close() // Error here is purposefully ignored.
|
||||
|
||||
// Done channel is used to close any lingering retry routine, as soon
|
||||
// as this function returns.
|
||||
doneCh := make(chan struct{}) |
||||
defer close(doneCh) |
||||
|
||||
for i := range newRetryTimer(f.retryUnit, f.retryCap, doneCh) { |
||||
// Initialize and make a new login attempt.
|
||||
err = f.remoteStorage.Init() |
||||
if err != nil { |
||||
// No need to return error until the retry count
|
||||
// threshold has reached.
|
||||
if i < f.maxRetryAttempts { |
||||
continue |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// Attempt to load format to see if the disk is really
|
||||
// a formatted disk and part of the cluster.
|
||||
if _, err = loadFormat(f.remoteStorage); err != nil { |
||||
// No need to return error until the retry count
|
||||
// threshold has reached.
|
||||
if i < f.maxRetryAttempts { |
||||
continue |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// Login and loading format was a success, break and proceed forward.
|
||||
break |
||||
} |
||||
return err |
||||
} |
@ -1,455 +0,0 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"os" |
||||
"reflect" |
||||
"testing" |
||||
"time" |
||||
|
||||
sha256 "github.com/minio/sha256-simd" |
||||
) |
||||
|
||||
// Tests retry storage.
|
||||
func TestRetryStorage(t *testing.T) { |
||||
root, err := newTestConfig(globalMinioDefaultRegion) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(root) |
||||
|
||||
originalStorageDisks, disks := prepareXLStorageDisks(t) |
||||
defer removeRoots(disks) |
||||
|
||||
var storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
// Validate all the conditions for retrying calls.
|
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
err = disk.Init() |
||||
if err != errDiskNotFound { |
||||
t.Fatal("Expected errDiskNotFound, got", err) |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
_, err = disk.DiskInfo() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
if err = disk.MakeVol("existent"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if _, err = disk.StatVol("existent"); err == errVolumeNotFound { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
if _, err = disk.StatVol("existent"); err == errVolumeNotFound { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
if _, err = disk.ListVols(); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
if err = disk.DeleteVol("existent"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if str := disk.String(); str == "" { |
||||
t.Fatal("String method for disk cannot be empty.") |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
if err = disk.MakeVol("existent"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
if err = disk.PrepareFile("existent", "path", 10); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
if err = disk.AppendFile("existent", "path", []byte("Hello, World")); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
var buf1 []byte |
||||
if buf1, err = disk.ReadAll("existent", "path"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(buf1, []byte("Hello, World")) { |
||||
t.Fatalf("Expected `Hello, World`, got %s", string(buf1)) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
var buf2 = make([]byte, 5) |
||||
var n int64 |
||||
if n, err = disk.ReadFile("existent", "path", 7, buf2, nil); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if err != nil { |
||||
t.Error("Error in ReadFile", err) |
||||
} |
||||
if n != 5 { |
||||
t.Fatalf("Expected 5, got %d", n) |
||||
} |
||||
if !bytes.Equal(buf2, []byte("World")) { |
||||
t.Fatalf("Expected `World`, got %s", string(buf2)) |
||||
} |
||||
} |
||||
|
||||
sha256Hash := func(b []byte) []byte { |
||||
k := sha256.Sum256(b) |
||||
return k[:] |
||||
} |
||||
for _, disk := range storageDisks { |
||||
var buf2 = make([]byte, 5) |
||||
verifier := NewBitrotVerifier(SHA256, sha256Hash([]byte("Hello, World"))) |
||||
var n int64 |
||||
if n, err = disk.ReadFile("existent", "path", 7, buf2, verifier); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if err != nil { |
||||
t.Error("Error in ReadFile with bitrot verification", err) |
||||
} |
||||
if n != 5 { |
||||
t.Fatalf("Expected 5, got %d", n) |
||||
} |
||||
if !bytes.Equal(buf2, []byte("World")) { |
||||
t.Fatalf("Expected `World`, got %s", string(buf2)) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
if err = disk.RenameFile("existent", "path", "existent", "new-path"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if _, err = disk.StatFile("existent", "new-path"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
if _, err = disk.StatFile("existent", "new-path"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
var entries []string |
||||
if entries, err = disk.ListDir("existent", ""); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !reflect.DeepEqual(entries, []string{"new-path"}) { |
||||
t.Fatalf("Expected []string{\"new-path\"}, got %s", entries) |
||||
} |
||||
} |
||||
|
||||
storageDisks = make([]StorageAPI, len(originalStorageDisks)) |
||||
for i := range originalStorageDisks { |
||||
retryDisk, ok := originalStorageDisks[i].(*retryStorage) |
||||
if !ok { |
||||
t.Fatal("storage disk is not *retryStorage type") |
||||
} |
||||
storageDisks[i] = &retryStorage{ |
||||
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{ |
||||
1: errDiskNotFound, |
||||
}, nil), |
||||
maxRetryAttempts: 1, |
||||
retryUnit: time.Millisecond, |
||||
retryCap: time.Millisecond * 10, |
||||
} |
||||
} |
||||
|
||||
for _, disk := range storageDisks { |
||||
if err = disk.DeleteFile("existent", "new-path"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if err = disk.DeleteVol("existent"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Tests reply storage error transformation.
|
||||
func TestReplyStorageErr(t *testing.T) { |
||||
unknownErr := errors.New("Unknown error") |
||||
testCases := []struct { |
||||
expectedErr error |
||||
err error |
||||
}{ |
||||
{ |
||||
expectedErr: errDiskNotFound, |
||||
err: errDiskNotFoundFromNetError, |
||||
}, |
||||
{ |
||||
expectedErr: errDiskNotFound, |
||||
err: errDiskNotFoundFromRPCShutdown, |
||||
}, |
||||
{ |
||||
expectedErr: unknownErr, |
||||
err: unknownErr, |
||||
}, |
||||
} |
||||
for i, testCase := range testCases { |
||||
resultErr := retryToStorageErr(testCase.err) |
||||
if testCase.expectedErr != resultErr { |
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedErr, resultErr) |
||||
} |
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,191 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"os" |
||||
"path/filepath" |
||||
"testing" |
||||
) |
||||
|
||||
// TestCrcHashMod - test crc hash.
|
||||
func TestCrcHashMod(t *testing.T) { |
||||
testCases := []struct { |
||||
objectName string |
||||
crcHash int |
||||
}{ |
||||
// cases which should pass the test.
|
||||
// passing in valid object name.
|
||||
{"object", 12}, |
||||
{"The Shining Script <v1>.pdf", 14}, |
||||
{"Cost Benefit Analysis (2009-2010).pptx", 13}, |
||||
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", 1}, |
||||
{"SHØRT", 9}, |
||||
{"There are far too many object names, and far too few bucket names!", 13}, |
||||
{"a/b/c/", 1}, |
||||
{"/a/b/c", 4}, |
||||
{string([]byte{0xff, 0xfe, 0xfd}), 13}, |
||||
} |
||||
|
||||
// Tests hashing order to be consistent.
|
||||
for i, testCase := range testCases { |
||||
if crcHashElement := hashKey("CRCMOD", testCase.objectName, 16); crcHashElement != testCase.crcHash { |
||||
t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.crcHash, crcHashElement) |
||||
} |
||||
} |
||||
|
||||
if crcHashElement := hashKey("CRCMOD", "This will fail", -1); crcHashElement != -1 { |
||||
t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) |
||||
} |
||||
|
||||
if crcHashElement := hashKey("CRCMOD", "This will fail", 0); crcHashElement != -1 { |
||||
t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) |
||||
} |
||||
|
||||
if crcHashElement := hashKey("UNKNOWN", "This will fail", 0); crcHashElement != -1 { |
||||
t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) |
||||
} |
||||
} |
||||
|
||||
// TestNewXL - tests initialization of all input disks
|
||||
// and constructs a valid `XL` object
|
||||
func TestNewXLSets(t *testing.T) { |
||||
var nDisks = 16 // Maximum disks.
|
||||
var erasureDisks []string |
||||
for i := 0; i < nDisks; i++ { |
||||
// Do not attempt to create this path, the test validates
|
||||
// so that newXLSets initializes non existing paths
|
||||
// and successfully returns initialized object layer.
|
||||
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) |
||||
erasureDisks = append(erasureDisks, disk) |
||||
defer os.RemoveAll(disk) |
||||
} |
||||
|
||||
endpoints := mustGetNewEndpointList(erasureDisks...) |
||||
_, err := waitForFormatXL(true, endpoints, 0, 16) |
||||
if err != errInvalidArgument { |
||||
t.Fatalf("Expecting error, got %s", err) |
||||
} |
||||
|
||||
_, err = waitForFormatXL(true, nil, 1, 16) |
||||
if err != errInvalidArgument { |
||||
t.Fatalf("Expecting error, got %s", err) |
||||
} |
||||
|
||||
// Initializes all erasure disks
|
||||
format, err := waitForFormatXL(true, endpoints, 1, 16) |
||||
if err != nil { |
||||
t.Fatalf("Unable to format disks for erasure, %s", err) |
||||
} |
||||
|
||||
if _, err := newXLSets(endpoints, format, 1, 16); err != nil { |
||||
t.Fatalf("Unable to initialize erasure") |
||||
} |
||||
} |
||||
|
||||
// TestStorageInfoSets - tests storage info for erasure coded sets of disks.
|
||||
func TestStorageInfoSets(t *testing.T) { |
||||
var nDisks = 16 // Maximum disks.
|
||||
var erasureDisks []string |
||||
for i := 0; i < nDisks; i++ { |
||||
// Do not attempt to create this path, the test validates
|
||||
// so that newXLSets initializes non existing paths
|
||||
// and successfully returns initialized object layer.
|
||||
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) |
||||
erasureDisks = append(erasureDisks, disk) |
||||
defer os.RemoveAll(disk) |
||||
} |
||||
|
||||
endpoints := mustGetNewEndpointList(erasureDisks...) |
||||
// Initializes all erasure disks
|
||||
format, err := waitForFormatXL(true, endpoints, 1, 16) |
||||
if err != nil { |
||||
t.Fatalf("Unable to format disks for erasure, %s", err) |
||||
} |
||||
|
||||
objLayer, err := newXLSets(endpoints, format, 1, 16) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// Get storage info first attempt.
|
||||
disks16Info := objLayer.StorageInfo() |
||||
|
||||
// This test assumes homogeneity between all disks,
|
||||
// i.e if we loose one disk the effective storage
|
||||
// usage values is assumed to decrease. If we have
|
||||
// heterogenous environment this is not true all the time.
|
||||
if disks16Info.Free <= 0 { |
||||
t.Fatalf("Diskinfo total free values should be greater 0") |
||||
} |
||||
if disks16Info.Total <= 0 { |
||||
t.Fatalf("Diskinfo total values should be greater 0") |
||||
} |
||||
} |
||||
|
||||
// TestHashedLayer - tests the hashed layer which will be returned
|
||||
// consistently for a given object name.
|
||||
func TestHashedLayer(t *testing.T) { |
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(rootPath) |
||||
|
||||
var objs []*xlObjects |
||||
|
||||
for i := 0; i < 16; i++ { |
||||
obj, fsDirs, err := prepareXL16() |
||||
if err != nil { |
||||
t.Fatal("Unable to initialize 'XL' object layer.", err) |
||||
} |
||||
|
||||
// Remove all dirs.
|
||||
for _, dir := range fsDirs { |
||||
defer os.RemoveAll(dir) |
||||
} |
||||
|
||||
objs = append(objs, obj.(*xlObjects)) |
||||
} |
||||
|
||||
sets := &xlSets{sets: objs, distributionAlgo: "CRCMOD"} |
||||
|
||||
testCases := []struct { |
||||
objectName string |
||||
expectedObj *xlObjects |
||||
}{ |
||||
// cases which should pass the test.
|
||||
// passing in valid object name.
|
||||
{"object", objs[12]}, |
||||
{"The Shining Script <v1>.pdf", objs[14]}, |
||||
{"Cost Benefit Analysis (2009-2010).pptx", objs[13]}, |
||||
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", objs[1]}, |
||||
{"SHØRT", objs[9]}, |
||||
{"There are far too many object names, and far too few bucket names!", objs[13]}, |
||||
{"a/b/c/", objs[1]}, |
||||
{"/a/b/c", objs[4]}, |
||||
{string([]byte{0xff, 0xfe, 0xfd}), objs[13]}, |
||||
} |
||||
|
||||
// Tests hashing order to be consistent.
|
||||
for i, testCase := range testCases { |
||||
gotObj := sets.getHashedSet(testCase.objectName) |
||||
if gotObj != testCase.expectedObj { |
||||
t.Errorf("Test case %d: Expected \"%#v\" but failed \"%#v\"", i+1, testCase.expectedObj, gotObj) |
||||
} |
||||
} |
||||
} |
@ -1,144 +0,0 @@ |
||||
/* |
||||
* Minio Cloud Storage (C) 2016, 2017 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package cmd |
||||
|
||||
import ( |
||||
"bytes" |
||||
"os" |
||||
"strconv" |
||||
"testing" |
||||
) |
||||
|
||||
// TestListObjectsHeal - Tests ListObjectsHeal API for XL
|
||||
func TestListObjectsHeal(t *testing.T) { |
||||
|
||||
initNSLock(false) |
||||
|
||||
rootPath, err := newTestConfig(globalMinioDefaultRegion) |
||||
if err != nil { |
||||
t.Fatalf("Init Test config failed") |
||||
} |
||||
// remove the root directory after the test ends.
|
||||
defer os.RemoveAll(rootPath) |
||||
|
||||
// Create an instance of xl backend
|
||||
xl, fsDirs, err := prepareXL16() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
// Cleanup backend directories
|
||||
defer removeRoots(fsDirs) |
||||
|
||||
bucketName := "bucket" |
||||
objName := "obj" |
||||
|
||||
// Create test bucket
|
||||
err = xl.MakeBucketWithLocation(bucketName, "") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// Put 5 objects under sane dir
|
||||
for i := 0; i < 5; i++ { |
||||
_, err = xl.PutObject(bucketName, "sane/"+objName+strconv.Itoa(i), |
||||
mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) |
||||
if err != nil { |
||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err) |
||||
} |
||||
} |
||||
// Put 5 objects under unsane/subdir dir
|
||||
for i := 0; i < 5; i++ { |
||||
_, err = xl.PutObject(bucketName, "unsane/subdir/"+objName+strconv.Itoa(i), |
||||
mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) |
||||
if err != nil { |
||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err) |
||||
} |
||||
} |
||||
|
||||
// Structure for testing
|
||||
type testData struct { |
||||
bucket string |
||||
object string |
||||
marker string |
||||
delimiter string |
||||
maxKeys int |
||||
expectedErr error |
||||
foundObjs int |
||||
} |
||||
|
||||
// Generic function for testing ListObjectsHeal, needs testData as a parameter
|
||||
testFunc := func(testCase testData, testRank int) { |
||||
objectsNeedHeal, foundErr := xl.ListObjectsHeal(testCase.bucket, testCase.object, testCase.marker, testCase.delimiter, testCase.maxKeys) |
||||
if testCase.expectedErr == nil && foundErr != nil { |
||||
t.Fatalf("Test %d: Expected nil error, found: %v", testRank, foundErr) |
||||
} |
||||
if testCase.expectedErr != nil && foundErr.Error() != testCase.expectedErr.Error() { |
||||
t.Fatalf("Test %d: Found unexpected error: %v, expected: %v", testRank, foundErr, testCase.expectedErr) |
||||
|
||||
} |
||||
if len(objectsNeedHeal.Objects) != testCase.foundObjs { |
||||
t.Fatalf("Test %d: Found unexpected number of objects: %d, expected: %v", testRank, len(objectsNeedHeal.Objects), testCase.foundObjs) |
||||
} |
||||
} |
||||
|
||||
// Start tests
|
||||
|
||||
testCases := []testData{ |
||||
// Wrong bucket name
|
||||
{"foobucket", "", "", "", 1000, BucketNotFound{Bucket: "foobucket"}, 0}, |
||||
// Inexistent object
|
||||
{bucketName, "inexistentObj", "", "", 1000, nil, 0}, |
||||
// Test ListObjectsHeal when all objects are sane
|
||||
{bucketName, "", "", "", 1000, nil, 10}, |
||||
} |
||||
for i, testCase := range testCases { |
||||
testFunc(testCase, i+1) |
||||
} |
||||
|
||||
// Test ListObjectsHeal when all objects under unsane need healing
|
||||
xlObj := xl.(*xlObjects) |
||||
for i := 0; i < 5; i++ { |
||||
if err = xlObj.storageDisks[0].DeleteFile(bucketName, "unsane/subdir/"+objName+strconv.Itoa(i)+"/xl.json"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
// Start tests again with some objects that need healing
|
||||
|
||||
testCases = []testData{ |
||||
// Test ListObjectsHeal when all objects under unsane/ need to be healed
|
||||
{bucketName, "", "", "", 1000, nil, 10}, |
||||
// List objects heal under unsane/, should return all elements
|
||||
{bucketName, "unsane/", "", "", 1000, nil, 5}, |
||||
// List healing objects under sane/
|
||||
{bucketName, "sane/", "", "", 1000, nil, 5}, |
||||
// Max Keys == 2
|
||||
{bucketName, "unsane/", "", "", 2, nil, 2}, |
||||
// Max key > 1000
|
||||
{bucketName, "unsane/", "", "", 5000, nil, 5}, |
||||
// Prefix == Delimiter == "/"
|
||||
{bucketName, "/", "", "/", 1000, nil, 0}, |
||||
// Max Keys == 0
|
||||
{bucketName, "", "", "", 0, nil, 0}, |
||||
// Testing with marker parameter
|
||||
{bucketName, "", "unsane/subdir/" + objName + "0", "", 1000, nil, 4}, |
||||
} |
||||
for i, testCase := range testCases { |
||||
testFunc(testCase, i+1) |
||||
} |
||||
|
||||
} |
@ -0,0 +1,184 @@ |
||||
## Command-line |
||||
``` |
||||
NAME: |
||||
minio server - Start object storage server. |
||||
|
||||
USAGE: |
||||
minio server [FLAGS] DIR1 [DIR2..] |
||||
minio server [FLAGS] DIR{1...64} |
||||
|
||||
DIR: |
||||
DIR points to a directory on a filesystem. When you want to combine multiple drives |
||||
into a single large system, pass one directory per filesystem separated by space. |
||||
You may also use a `...` convention to abbreviate the directory arguments. Remote |
||||
directories in a distributed setup are encoded as HTTP(s) URIs. |
||||
``` |
||||
|
||||
## Limitations |
||||
- Minimum of 4 disks are needed for distributed erasure coded configuration. |
||||
- Maximum of 32 distinct nodes are supported in distributed configuration. |
||||
|
||||
## Common usage |
||||
Single disk filesystem export |
||||
``` |
||||
minio server dir1 |
||||
``` |
||||
|
||||
Standalone erasure coded configuration with 4 disks. |
||||
``` |
||||
minio server dir1 dir2 dir3 dir4 |
||||
``` |
||||
|
||||
Standalone erasure coded configuration with 4 sets with 16 disks each. |
||||
``` |
||||
minio server dir{1...64} |
||||
``` |
||||
|
||||
Distributed erasure coded configuration with 64 sets with 16 disks each. |
||||
``` |
||||
minio server http://host{1...16}/export{1...64} - good |
||||
``` |
||||
|
||||
## Other usages |
||||
|
||||
### Advanced use cases with multiple ellipses |
||||
|
||||
Standalone erasure coded configuration with 4 sets with 16 disks each, which spawns disks across controllers. |
||||
``` |
||||
minio server /mnt/controller{1...4}/data{1...16} |
||||
``` |
||||
|
||||
Standalone erasure coded configuration with 16 sets 16 disks per set, across mnts, across controllers. |
||||
``` |
||||
minio server /mnt{1..4}/controller{1...4}/data{1...16} |
||||
``` |
||||
|
||||
Distributed erasure coded configuration with 2 sets 16 disks per set across hosts. |
||||
``` |
||||
minio server http://host{1...32}/disk1 |
||||
``` |
||||
|
||||
Distributed erasure coded configuration with rack level redundancy 32 sets in total, 16 disks per set. |
||||
``` |
||||
minio server http://rack{1...4}-host{1...8}.example.net/export{1...16} |
||||
``` |
||||
|
||||
Distributed erasure coded configuration with no rack level redundancy but redundancy with in the rack we split the arguments, 32 sets in total, 16 disks per set. |
||||
``` |
||||
minio server http://rack1-host{1...8}.example.net/export{1...16} http://rack2-host{1...8}.example.net/export{1...16} http://rack3-host{1...8}.example.net/export{1...16} http://rack4-host{1...8}.example.net/export{1...16} |
||||
``` |
||||
|
||||
### Expected expansion for double ellipses |
||||
``` |
||||
minio server http://host{1...4}/export{1...8} |
||||
``` |
||||
|
||||
Expected expansion |
||||
``` |
||||
> http://host1/export1 |
||||
> http://host2/export1 |
||||
> http://host3/export1 |
||||
> http://host4/export1 |
||||
> http://host1/export2 |
||||
> http://host2/export2 |
||||
> http://host3/export2 |
||||
> http://host4/export2 |
||||
> http://host1/export3 |
||||
> http://host2/export3 |
||||
> http://host3/export3 |
||||
> http://host4/export3 |
||||
> http://host1/export4 |
||||
> http://host2/export4 |
||||
> http://host3/export4 |
||||
> http://host4/export4 |
||||
> http://host1/export5 |
||||
> http://host2/export5 |
||||
> http://host3/export5 |
||||
> http://host4/export5 |
||||
> http://host1/export6 |
||||
> http://host2/export6 |
||||
> http://host3/export6 |
||||
> http://host4/export6 |
||||
> http://host1/export7 |
||||
> http://host2/export7 |
||||
> http://host3/export7 |
||||
> http://host4/export7 |
||||
> http://host1/export8 |
||||
> http://host2/export8 |
||||
> http://host3/export8 |
||||
> http://host4/export8 |
||||
``` |
||||
|
||||
## Backend `format.json` changes |
||||
New `format.json` has new fields |
||||
|
||||
- `disk` is changed to `this` |
||||
- `jbod` is changed to `sets` , along with this change sets is also a two dimensional list representing total sets and disks per set. |
||||
|
||||
A sample `format.json` looks like below |
||||
```json |
||||
{ |
||||
"version": "1", |
||||
"format": "xl", |
||||
"xl": { |
||||
"version": "2", |
||||
"this": "4ec63786-3dbd-4a9e-96f5-535f6e850fb1", |
||||
"sets": [ |
||||
[ |
||||
"4ec63786-3dbd-4a9e-96f5-535f6e850fb1", |
||||
"1f3cf889-bc90-44ca-be2a-732b53be6c9d", |
||||
"4b23eede-1846-482c-b96f-bfb647f058d3", |
||||
"e1f17302-a850-419d-8cdb-a9f884a63c92" |
||||
], [ |
||||
"2ca4c5c1-dccb-4198-a840-309fea3b5449", |
||||
"6d1e666e-a22c-4db4-a038-2545c2ccb6d5", |
||||
"d4fa35ab-710f-4423-a7c2-e1ca33124df0", |
||||
"88c65e8b-00cb-4037-a801-2549119c9a33" |
||||
] |
||||
], |
||||
"distributionAlgo": "CRCMOD" |
||||
} |
||||
} |
||||
``` |
||||
|
||||
New `format-xl.go` behavior is format structure is used as a opaque type, `Format` field signifies the format of the backend. Once the format has been identified it is now the job of the identified backend to further interpret the next structures and validate them. |
||||
|
||||
```go |
||||
type formatType string |
||||
|
||||
const ( |
||||
formatFS formatType = "fs" |
||||
formatXL = "xl" |
||||
) |
||||
|
||||
type format struct { |
||||
Version string |
||||
Format BackendFormat |
||||
} |
||||
``` |
||||
|
||||
### Current format |
||||
```go |
||||
type formatXLV1 struct{ |
||||
format |
||||
XL struct{ |
||||
Version string |
||||
Disk string |
||||
JBOD []string |
||||
} |
||||
} |
||||
``` |
||||
|
||||
### New format |
||||
```go |
||||
type formatXLV2 struct { |
||||
Version string `json:"version"` |
||||
Format string `json:"format"` |
||||
XL struct { |
||||
Version string `json:"version"` |
||||
This string `json:"this"` |
||||
Sets [][]string `json:"sets"` |
||||
DistributionAlgo string `json:"distributionAlgo"` |
||||
} `json:"xl"` |
||||
} |
||||
``` |
@ -0,0 +1,48 @@ |
||||
# Large Bucket Support Quickstart Guide [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![codecov](https://codecov.io/gh/minio/minio/branch/master/graph/badge.svg)](https://codecov.io/gh/minio/minio) |
||||
|
||||
Minio large bucket support lets you use more than 16 disks by creating a number of smaller sets of erasure coded units, these units are further combined into a single namespace. Minio large bucket support is developed to solve for several real world use cases, without any special configuration changes. Some of these are |
||||
|
||||
- You already have racks with many disks. |
||||
- You are looking for large capacity up-front for your object storage needs. |
||||
|
||||
# Get started |
||||
If you're aware of distributed Minio setup, the installation and running remains the same. Newer syntax to use a `...` convention to abbreviate the directory arguments. Remote directories in a distributed setup are encoded as HTTP(s) URIs which can be similarly abbreviated as well. |
||||
|
||||
## 1. Prerequisites |
||||
Install Minio - [Minio Quickstart Guide](https://docs.minio.io/docs/minio). |
||||
|
||||
## 2. Run Minio on many disks |
||||
To run Minio large bucket instances, you need to start multiple Minio servers pointing to the same disks. We'll see examples on how to do this in the following sections. |
||||
|
||||
*Note* |
||||
|
||||
- All the nodes running distributed Minio need to have same access key and secret key. To achieve this, we export access key and secret key as environment variables on all the nodes before executing Minio server command. |
||||
- The drive paths below are for demonstration purposes only, you need to replace these with the actual drive paths/folders. |
||||
|
||||
### Minio large bucket on Ubuntu 16.04 LTS standalone |
||||
You'll need the path to the disks e.g. `/export1, /export2 .... /export24`. Then run the following commands on all the nodes you'd like to launch Minio. |
||||
|
||||
```sh |
||||
export MINIO_ACCESS_KEY=<ACCESS_KEY> |
||||
export MINIO_SECRET_KEY=<SECRET_KEY> |
||||
minio server /export{1...24} |
||||
``` |
||||
|
||||
### Minio large bucket on Ubuntu 16.04 LTS servers |
||||
You'll need the path to the disks e.g. `/export1, /export2 .... /export16`. Then run the following commands on all the nodes you'd like to launch Minio. |
||||
|
||||
```sh |
||||
export MINIO_ACCESS_KEY=<ACCESS_KEY> |
||||
export MINIO_SECRET_KEY=<SECRET_KEY> |
||||
minio server http://host{1...4}/export{1...16} |
||||
``` |
||||
|
||||
## 3. Test your setup |
||||
To test this setup, access the Minio server via browser or [`mc`](https://docs.minio.io/docs/minio-client-quickstart-guide). You’ll see the uploaded files are accessible from the all the Minio endpoints. |
||||
|
||||
## Explore Further |
||||
- [Use `mc` with Minio Server](https://docs.minio.io/docs/minio-client-quickstart-guide) |
||||
- [Use `aws-cli` with Minio Server](https://docs.minio.io/docs/aws-cli-with-minio) |
||||
- [Use `s3cmd` with Minio Server](https://docs.minio.io/docs/s3cmd-with-minio) |
||||
- [Use `minio-go` SDK with Minio Server](https://docs.minio.io/docs/golang-client-quickstart-guide) |
||||
- [The Minio documentation website](https://docs.minio.io) |
@ -0,0 +1,16 @@ |
||||
Introduction [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) |
||||
------------ |
||||
|
||||
This feature allows Minio to combine a set of disks larger than 16 in a distributed setup. There are no special configuration changes required to enable this feature. Access to files stored across this setup are locked and synchronized by default. |
||||
|
||||
Motivation |
||||
---------- |
||||
|
||||
As next-generation data centers continue to shrink, IT professions must re-evaluate ahead to get the benefits of greater server density and storage density. Computer hardware is changing rapidly in system form factors, virtualization, containerization have allowed far more enterprise computing with just a fraction of the physical space. Increased densities allow for smaller capital purchases and lower energy bills. |
||||
|
||||
Restrictions |
||||
------------ |
||||
|
||||
* Each set is still a maximum of 16 disks, you can start with multiple such sets statically. |
||||
* Static sets of disks and cannot be changed, there is no elastic expansion allowed. |
||||
* ListObjects() across sets can be relatively slower since List happens on all servers, and is merged at this layer. |
@ -0,0 +1,77 @@ |
||||
// Original work https://github.com/oxtoacart/bpool borrowed
|
||||
// only bpool.go licensed under Apache 2.0.
|
||||
|
||||
// This file modifies original bpool.go to add one more option
|
||||
// to provide []byte capacity for better GC management.
|
||||
|
||||
/* |
||||
* Minio Cloud Storage (C) 2018 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package bpool |
||||
|
||||
// BytePoolCap implements a leaky pool of []byte in the form of a bounded channel.
|
||||
type BytePoolCap struct { |
||||
c chan []byte |
||||
w int |
||||
wcap int |
||||
} |
||||
|
||||
// NewBytePoolCap creates a new BytePool bounded to the given maxSize, with new
|
||||
// byte arrays sized based on width.
|
||||
func NewBytePoolCap(maxSize int, width int, capwidth int) (bp *BytePoolCap) { |
||||
return &BytePoolCap{ |
||||
c: make(chan []byte, maxSize), |
||||
w: width, |
||||
wcap: capwidth, |
||||
} |
||||
} |
||||
|
||||
// Get gets a []byte from the BytePool, or creates a new one if none are
|
||||
// available in the pool.
|
||||
func (bp *BytePoolCap) Get() (b []byte) { |
||||
select { |
||||
case b = <-bp.c: |
||||
// reuse existing buffer
|
||||
default: |
||||
// create new buffer
|
||||
if bp.wcap > 0 { |
||||
b = make([]byte, bp.w, bp.wcap) |
||||
} else { |
||||
b = make([]byte, bp.w) |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// Put returns the given Buffer to the BytePool.
|
||||
func (bp *BytePoolCap) Put(b []byte) { |
||||
select { |
||||
case bp.c <- b: |
||||
// buffer went back into pool
|
||||
default: |
||||
// buffer didn't go back into pool, just discard
|
||||
} |
||||
} |
||||
|
||||
// Width returns the width of the byte arrays in this pool.
|
||||
func (bp *BytePoolCap) Width() (n int) { |
||||
return bp.w |
||||
} |
||||
|
||||
// WidthCap returns the cap width of the byte arrays in this pool.
|
||||
func (bp *BytePoolCap) WidthCap() (n int) { |
||||
return bp.wcap |
||||
} |
@ -0,0 +1,96 @@ |
||||
// Original work https://github.com/oxtoacart/bpool borrowed
|
||||
// only bpool.go licensed under Apache 2.0.
|
||||
|
||||
// This file modifies original bpool.go to add one more option
|
||||
// to provide []byte capacity for better GC management.
|
||||
|
||||
/* |
||||
* Minio Cloud Storage (C) 2018 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package bpool |
||||
|
||||
import "testing" |
||||
|
||||
// Tests - bytePool functionality.
|
||||
func TestBytePool(t *testing.T) { |
||||
var size = 4 |
||||
var width = 10 |
||||
var capWidth = 16 |
||||
|
||||
bufPool := NewBytePoolCap(size, width, capWidth) |
||||
|
||||
// Check the width
|
||||
if bufPool.Width() != width { |
||||
t.Fatalf("bytepool width invalid: got %v want %v", bufPool.Width(), width) |
||||
} |
||||
|
||||
// Check with width cap
|
||||
if bufPool.WidthCap() != capWidth { |
||||
t.Fatalf("bytepool capWidth invalid: got %v want %v", bufPool.WidthCap(), capWidth) |
||||
} |
||||
|
||||
// Check that retrieved buffer are of the expected width
|
||||
b := bufPool.Get() |
||||
if len(b) != width { |
||||
t.Fatalf("bytepool length invalid: got %v want %v", len(b), width) |
||||
} |
||||
if cap(b) != capWidth { |
||||
t.Fatalf("bytepool length invalid: got %v want %v", cap(b), capWidth) |
||||
} |
||||
|
||||
bufPool.Put(b) |
||||
|
||||
// Fill the pool beyond the capped pool size.
|
||||
for i := 0; i < size*2; i++ { |
||||
bufPool.Put(make([]byte, bufPool.w)) |
||||
} |
||||
|
||||
b = bufPool.Get() |
||||
if len(b) != width { |
||||
t.Fatalf("bytepool length invalid: got %v want %v", len(b), width) |
||||
} |
||||
if cap(b) != capWidth { |
||||
t.Fatalf("bytepool length invalid: got %v want %v", cap(b), capWidth) |
||||
} |
||||
|
||||
bufPool.Put(b) |
||||
|
||||
// Close the channel so we can iterate over it.
|
||||
close(bufPool.c) |
||||
|
||||
// Check the size of the pool.
|
||||
if len(bufPool.c) != size { |
||||
t.Fatalf("bytepool size invalid: got %v want %v", len(bufPool.c), size) |
||||
} |
||||
|
||||
bufPoolNoCap := NewBytePoolCap(size, width, 0) |
||||
// Check the width
|
||||
if bufPoolNoCap.Width() != width { |
||||
t.Fatalf("bytepool width invalid: got %v want %v", bufPool.Width(), width) |
||||
} |
||||
|
||||
// Check with width cap
|
||||
if bufPoolNoCap.WidthCap() != 0 { |
||||
t.Fatalf("bytepool capWidth invalid: got %v want %v", bufPool.WidthCap(), 0) |
||||
} |
||||
b = bufPoolNoCap.Get() |
||||
if len(b) != width { |
||||
t.Fatalf("bytepool length invalid: got %v want %v", len(b), width) |
||||
} |
||||
if cap(b) != width { |
||||
t.Fatalf("bytepool length invalid: got %v want %v", cap(b), width) |
||||
} |
||||
} |
@ -0,0 +1,207 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package ellipses |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"regexp" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
var ( |
||||
// Regex to extract ellipses syntax inputs.
|
||||
regexpEllipses = regexp.MustCompile(`(.*)({[0-9]*\.\.\.[0-9]*})(.*)`) |
||||
|
||||
// Ellipses constants
|
||||
openBraces = "{" |
||||
closeBraces = "}" |
||||
ellipses = "..." |
||||
) |
||||
|
||||
// Parses an ellipses range pattern of following style
|
||||
// `{1...64}`
|
||||
// `{33...64}`
|
||||
func parseEllipsesRange(pattern string) (seq []string, err error) { |
||||
if strings.Index(pattern, openBraces) == -1 { |
||||
return nil, errors.New("Invalid argument") |
||||
} |
||||
if strings.Index(pattern, closeBraces) == -1 { |
||||
return nil, errors.New("Invalid argument") |
||||
} |
||||
|
||||
pattern = strings.TrimPrefix(pattern, openBraces) |
||||
pattern = strings.TrimSuffix(pattern, closeBraces) |
||||
|
||||
ellipsesRange := strings.Split(pattern, ellipses) |
||||
if len(ellipsesRange) != 2 { |
||||
return nil, errors.New("Invalid argument") |
||||
} |
||||
|
||||
var start, end uint64 |
||||
if start, err = strconv.ParseUint(ellipsesRange[0], 10, 64); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if end, err = strconv.ParseUint(ellipsesRange[1], 10, 64); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if start > end { |
||||
return nil, fmt.Errorf("Incorrect range start %d cannot be bigger than end %d", start, end) |
||||
} |
||||
|
||||
for i := start; i <= end; i++ { |
||||
if strings.HasPrefix(ellipsesRange[0], "0") && len(ellipsesRange[0]) > 1 || strings.HasPrefix(ellipsesRange[1], "0") { |
||||
seq = append(seq, fmt.Sprintf(fmt.Sprintf("%%0%dd", len(ellipsesRange[1])), i)) |
||||
} else { |
||||
seq = append(seq, fmt.Sprintf("%d", i)) |
||||
} |
||||
} |
||||
|
||||
return seq, nil |
||||
} |
||||
|
||||
// Pattern - ellipses pattern, describes the range and also the
|
||||
// associated prefix and suffixes.
|
||||
type Pattern struct { |
||||
Prefix string |
||||
Suffix string |
||||
Seq []string |
||||
} |
||||
|
||||
// argExpander - recursively expands labels into its respective forms.
|
||||
func argExpander(labels [][]string) (out [][]string) { |
||||
if len(labels) == 1 { |
||||
for _, v := range labels[0] { |
||||
out = append(out, []string{v}) |
||||
} |
||||
return out |
||||
} |
||||
for _, lbl := range labels[0] { |
||||
rs := argExpander(labels[1:]) |
||||
for _, rlbls := range rs { |
||||
r := append(rlbls, []string{lbl}...) |
||||
out = append(out, r) |
||||
} |
||||
} |
||||
return out |
||||
} |
||||
|
||||
// ArgPattern contains a list of patterns provided in the input.
|
||||
type ArgPattern []Pattern |
||||
|
||||
// Expand - expands all the ellipses patterns in
|
||||
// the given argument.
|
||||
func (a ArgPattern) Expand() [][]string { |
||||
labels := make([][]string, len(a)) |
||||
for i := range labels { |
||||
labels[i] = a[i].Expand() |
||||
} |
||||
return argExpander(labels) |
||||
} |
||||
|
||||
// Expand - expands a ellipses pattern.
|
||||
func (p Pattern) Expand() []string { |
||||
var labels []string |
||||
for i := range p.Seq { |
||||
switch { |
||||
case p.Prefix != "" && p.Suffix == "": |
||||
labels = append(labels, fmt.Sprintf("%s%s", p.Prefix, p.Seq[i])) |
||||
case p.Suffix != "" && p.Prefix == "": |
||||
labels = append(labels, fmt.Sprintf("%s%s", p.Seq[i], p.Suffix)) |
||||
case p.Suffix == "" && p.Prefix == "": |
||||
labels = append(labels, fmt.Sprintf("%s", p.Seq[i])) |
||||
default: |
||||
labels = append(labels, fmt.Sprintf("%s%s%s", p.Prefix, p.Seq[i], p.Suffix)) |
||||
} |
||||
} |
||||
return labels |
||||
} |
||||
|
||||
// HasEllipses - returns true if input arg has ellipses type pattern.
|
||||
func HasEllipses(args ...string) bool { |
||||
var ok = true |
||||
for _, arg := range args { |
||||
ok = ok && (strings.Count(arg, ellipses) > 0 || (strings.Count(arg, openBraces) > 0 && strings.Count(arg, closeBraces) > 0)) |
||||
} |
||||
return ok |
||||
} |
||||
|
||||
// ErrInvalidEllipsesFormatFn error returned when invalid ellipses format is detected.
|
||||
var ErrInvalidEllipsesFormatFn = func(arg string) error { |
||||
return fmt.Errorf("Invalid ellipsis format in (%s), Ellipsis range must be provided in format {N...M} where N and M are positive integers, M must be greater than N, with an allowed minimum range of 4", arg) |
||||
} |
||||
|
||||
// FindEllipsesPatterns - finds all ellipses patterns, recursively and parses the ranges numerically.
|
||||
func FindEllipsesPatterns(arg string) (ArgPattern, error) { |
||||
var patterns []Pattern |
||||
parts := regexpEllipses.FindStringSubmatch(arg) |
||||
if len(parts) == 0 { |
||||
// We throw an error if arg doesn't have any recognizable ellipses pattern.
|
||||
return nil, ErrInvalidEllipsesFormatFn(arg) |
||||
} |
||||
|
||||
parts = parts[1:] |
||||
patternFound := regexpEllipses.MatchString(parts[0]) |
||||
for patternFound { |
||||
seq, err := parseEllipsesRange(parts[1]) |
||||
if err != nil { |
||||
return patterns, err |
||||
} |
||||
patterns = append(patterns, Pattern{ |
||||
Prefix: "", |
||||
Suffix: parts[2], |
||||
Seq: seq, |
||||
}) |
||||
parts = regexpEllipses.FindStringSubmatch(parts[0]) |
||||
if len(parts) > 0 { |
||||
parts = parts[1:] |
||||
patternFound = HasEllipses(parts[0]) |
||||
continue |
||||
} |
||||
break |
||||
} |
||||
|
||||
if len(parts) > 0 { |
||||
seq, err := parseEllipsesRange(parts[1]) |
||||
if err != nil { |
||||
return patterns, err |
||||
} |
||||
|
||||
patterns = append(patterns, Pattern{ |
||||
Prefix: parts[0], |
||||
Suffix: parts[2], |
||||
Seq: seq, |
||||
}) |
||||
} |
||||
|
||||
// Check if any of the prefix or suffixes now have flower braces
|
||||
// left over, in such a case we generally think that there is
|
||||
// perhaps a typo in users input and error out accordingly.
|
||||
for _, pattern := range patterns { |
||||
if strings.Count(pattern.Prefix, openBraces) > 0 || strings.Count(pattern.Prefix, closeBraces) > 0 { |
||||
return nil, ErrInvalidEllipsesFormatFn(arg) |
||||
} |
||||
if strings.Count(pattern.Suffix, openBraces) > 0 || strings.Count(pattern.Suffix, closeBraces) > 0 { |
||||
return nil, ErrInvalidEllipsesFormatFn(arg) |
||||
} |
||||
} |
||||
|
||||
return patterns, nil |
||||
} |
@ -0,0 +1,244 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package ellipses |
||||
|
||||
import ( |
||||
"fmt" |
||||
"testing" |
||||
) |
||||
|
||||
// Test tests args with ellipses.
|
||||
func TestHasEllipses(t *testing.T) { |
||||
testCases := []struct { |
||||
args []string |
||||
expectedOk bool |
||||
}{ |
||||
// Tests for all args without ellipses.
|
||||
{ |
||||
[]string{"64"}, |
||||
false, |
||||
}, |
||||
// Found flower braces, still attempt to parse and throw an error.
|
||||
{ |
||||
[]string{"{1..64}"}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"{1..2..}"}, |
||||
true, |
||||
}, |
||||
// Test for valid input.
|
||||
{ |
||||
[]string{"1...64"}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"{1...2O}"}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"..."}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"{-1...1}"}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"{0...-1}"}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"{1....4}"}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"{1...64}"}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"{...}"}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"{1...64}", "{65...128}"}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{"http://minio{2...3}/export/set{1...64}"}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{ |
||||
"http://minio{2...3}/export/set{1...64}", |
||||
"http://minio{2...3}/export/set{65...128}", |
||||
}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{ |
||||
"mydisk-{a...z}{1...20}", |
||||
}, |
||||
true, |
||||
}, |
||||
{ |
||||
[]string{ |
||||
"mydisk-{1...4}{1..2.}", |
||||
}, |
||||
true, |
||||
}, |
||||
} |
||||
|
||||
for i, testCase := range testCases { |
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) { |
||||
gotOk := HasEllipses(testCase.args...) |
||||
if gotOk != testCase.expectedOk { |
||||
t.Errorf("Expected %t, got %t", testCase.expectedOk, gotOk) |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
|
||||
// Test tests find ellipses patterns.
|
||||
func TestFindEllipsesPatterns(t *testing.T) { |
||||
testCases := []struct { |
||||
pattern string |
||||
success bool |
||||
expectedCount int |
||||
}{ |
||||
// Tests for all invalid inputs
|
||||
{ |
||||
"{1..64}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"1...64", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"...", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"{1...", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"...64}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"{...}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"{-1...1}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"{0...-1}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"{1...2O}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"{64...1}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"{1....4}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"mydisk-{a...z}{1...20}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"mydisk-{1...4}{1..2.}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"{1..2.}-mydisk-{1...4}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"{{1...4}}", |
||||
false, |
||||
0, |
||||
}, |
||||
{ |
||||
"{4...02}", |
||||
false, |
||||
0, |
||||
}, |
||||
// Test for valid input.
|
||||
{ |
||||
"{1...64}", |
||||
true, |
||||
64, |
||||
}, |
||||
{ |
||||
"{1...64} {65...128}", |
||||
true, |
||||
4096, |
||||
}, |
||||
{ |
||||
"{01...036}", |
||||
true, |
||||
36, |
||||
}, |
||||
{ |
||||
"{001...036}", |
||||
true, |
||||
36, |
||||
}, |
||||
} |
||||
|
||||
for i, testCase := range testCases { |
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) { |
||||
argP, err := FindEllipsesPatterns(testCase.pattern) |
||||
if err != nil && testCase.success { |
||||
t.Errorf("Expected success but failed instead %s", err) |
||||
} |
||||
if err == nil && !testCase.success { |
||||
t.Errorf("Expected failure but passed instead") |
||||
} |
||||
if err == nil { |
||||
gotCount := len(argP.Expand()) |
||||
if gotCount != testCase.expectedCount { |
||||
t.Errorf("Expected %d, got %d", testCase.expectedCount, gotCount) |
||||
} |
||||
} |
||||
}) |
||||
} |
||||
} |
@ -0,0 +1,73 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
package madmin |
||||
|
||||
import ( |
||||
"testing" |
||||
) |
||||
|
||||
// Tests heal drives missing and offline counts.
|
||||
func TestHealDriveCounts(t *testing.T) { |
||||
rs := HealResultItem{} |
||||
rs.Before.Drives = make([]HealDriveInfo, 20) |
||||
rs.After.Drives = make([]HealDriveInfo, 20) |
||||
for i := range rs.Before.Drives { |
||||
if i < 4 { |
||||
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateMissing} |
||||
rs.After.Drives[i] = HealDriveInfo{State: DriveStateMissing} |
||||
} else if i > 4 && i < 15 { |
||||
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateOffline} |
||||
rs.After.Drives[i] = HealDriveInfo{State: DriveStateOffline} |
||||
} else if i > 15 { |
||||
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateCorrupt} |
||||
rs.After.Drives[i] = HealDriveInfo{State: DriveStateCorrupt} |
||||
} else { |
||||
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateOk} |
||||
rs.After.Drives[i] = HealDriveInfo{State: DriveStateOk} |
||||
} |
||||
} |
||||
|
||||
i, j := rs.GetOnlineCounts() |
||||
if i > 2 { |
||||
t.Errorf("Expected '2', got %d before online disks", i) |
||||
} |
||||
if j > 2 { |
||||
t.Errorf("Expected '2', got %d after online disks", j) |
||||
} |
||||
i, j = rs.GetOfflineCounts() |
||||
if i > 10 { |
||||
t.Errorf("Expected '10', got %d before offline disks", i) |
||||
} |
||||
if j > 10 { |
||||
t.Errorf("Expected '10', got %d after offline disks", j) |
||||
} |
||||
i, j = rs.GetCorruptedCounts() |
||||
if i > 4 { |
||||
t.Errorf("Expected '4', got %d before corrupted disks", i) |
||||
} |
||||
if j > 4 { |
||||
t.Errorf("Expected '4', got %d after corrupted disks", j) |
||||
} |
||||
i, j = rs.GetMissingCounts() |
||||
if i > 4 { |
||||
t.Errorf("Expected '4', got %d before missing disks", i) |
||||
} |
||||
if j > 4 { |
||||
t.Errorf("Expected '4', got %d after missing disks", i) |
||||
} |
||||
} |
@ -0,0 +1,59 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package errgroup |
||||
|
||||
import ( |
||||
"sync" |
||||
) |
||||
|
||||
// A Group is a collection of goroutines working on subtasks that are part of
|
||||
// the same overall task.
|
||||
//
|
||||
// A zero Group is valid and does not cancel on error.
|
||||
type Group struct { |
||||
wg sync.WaitGroup |
||||
errs []error |
||||
} |
||||
|
||||
// WithNErrs returns a new Group with length of errs slice upto nerrs,
|
||||
// upon Wait() errors are returned collected from all tasks.
|
||||
func WithNErrs(nerrs int) *Group { |
||||
return &Group{errs: make([]error, nerrs)} |
||||
} |
||||
|
||||
// Wait blocks until all function calls from the Go method have returned, then
|
||||
// returns the slice of errors from all function calls.
|
||||
func (g *Group) Wait() []error { |
||||
g.wg.Wait() |
||||
return g.errs |
||||
} |
||||
|
||||
// Go calls the given function in a new goroutine.
|
||||
//
|
||||
// The first call to return a non-nil error will be
|
||||
// collected in errs slice and returned by Wait().
|
||||
func (g *Group) Go(f func() error, index int) { |
||||
g.wg.Add(1) |
||||
|
||||
go func() { |
||||
defer g.wg.Done() |
||||
|
||||
if err := f(); err != nil { |
||||
g.errs[index] = err |
||||
} |
||||
}() |
||||
} |
@ -0,0 +1,52 @@ |
||||
/* |
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
package errgroup |
||||
|
||||
import ( |
||||
"fmt" |
||||
"reflect" |
||||
"testing" |
||||
) |
||||
|
||||
func TestGroupWithNErrs(t *testing.T) { |
||||
err1 := fmt.Errorf("errgroup_test: 1") |
||||
err2 := fmt.Errorf("errgroup_test: 2") |
||||
|
||||
cases := []struct { |
||||
errs []error |
||||
}{ |
||||
{errs: []error{nil}}, |
||||
{errs: []error{err1}}, |
||||
{errs: []error{err1, nil}}, |
||||
{errs: []error{err1, nil, err2}}, |
||||
} |
||||
|
||||
for j, tc := range cases { |
||||
t.Run(fmt.Sprintf("Test%d", j+1), func(t *testing.T) { |
||||
g := WithNErrs(len(tc.errs)) |
||||
for i, err := range tc.errs { |
||||
err := err |
||||
g.Go(func() error { return err }, i) |
||||
} |
||||
|
||||
gotErrs := g.Wait() |
||||
if !reflect.DeepEqual(gotErrs, tc.errs) { |
||||
t.Errorf("Expected %#v, got %#v", tc.errs, gotErrs) |
||||
} |
||||
}) |
||||
} |
||||
} |
Loading…
Reference in new issue