server: Implement --ignore-disks for ignoring disks from healing. (#2158)

By default server heals/creates missing directories and re-populates
`format.json`, in some scenarios when disk is down for maintainenance
it would be beneficial for users to ignore such disks rather than
mistakenly using `root` partition.

Fixes #2128
master
Harshavardhana 9 years ago committed by Anand Babu (AB) Periasamy
parent 0793237d94
commit bdff0848ed
  1. 55
      format-config-v1.go
  2. 2
      fs-v1_test.go
  3. 3
      object-common.go
  4. 15
      routers.go
  5. 39
      server-main.go
  6. 4
      test-utils_test.go
  7. 34
      xl-v1.go
  8. 2
      xl-v1_test.go

@ -115,27 +115,31 @@ var errSomeDiskOffline = errors.New("some disks are offline")
var errDiskOrderMismatch = errors.New("disk order mismatch") var errDiskOrderMismatch = errors.New("disk order mismatch")
// Returns error slice into understandable errors. // Returns error slice into understandable errors.
func reduceFormatErrs(errs []error, diskCount int) error { func reduceFormatErrs(errs []error, diskCount int) (err error) {
var errUnformattedDiskCount = 0 var errUnformattedDiskCount = 0
var errDiskNotFoundCount = 0 var errDiskNotFoundCount = 0
for _, err := range errs { for _, dErr := range errs {
if err == errUnformattedDisk { if dErr == errUnformattedDisk {
errUnformattedDiskCount++ errUnformattedDiskCount++
} else if err == errDiskNotFound { } else if dErr == errDiskNotFound {
errDiskNotFoundCount++ errDiskNotFoundCount++
} }
} }
// Returns errUnformattedDisk if all disks report unFormattedDisk. // Unformatted disks found, we need to figure out if any disks are offline.
if errUnformattedDiskCount == diskCount { if errUnformattedDiskCount > 0 {
// Returns errUnformattedDisk if all disks report unFormattedDisk.
if errUnformattedDiskCount < diskCount {
if errDiskNotFoundCount > 0 {
// Only some disks are fresh but some disks are offline as well.
return errSomeDiskOffline
}
// Some disks are fresh disks an unformatted, not disks are offline.
return errSomeDiskUnformatted
}
// All disks returned unformatted, all disks must be fresh.
return errUnformattedDisk return errUnformattedDisk
} else if errUnformattedDiskCount < diskCount && errDiskNotFoundCount == 0 {
// Only some disks return unFormattedDisk and all disks are online.
return errSomeDiskUnformatted
} else if errUnformattedDiskCount < diskCount && errDiskNotFoundCount > 0 {
// Only some disks return unFormattedDisk and some disks are
// offline as well.
return errSomeDiskOffline
} }
// No unformatted disks found no need to handle disk not found case, return success here.
return nil return nil
} }
@ -152,6 +156,10 @@ func loadAllFormats(bootstrapDisks []StorageAPI) ([]*formatConfigV1, []error) {
// Make a volume entry on all underlying storage disks. // Make a volume entry on all underlying storage disks.
for index, disk := range bootstrapDisks { for index, disk := range bootstrapDisks {
if disk == nil {
sErrs[index] = errDiskNotFound
continue
}
wg.Add(1) wg.Add(1)
// Make a volume inside a go-routine. // Make a volume inside a go-routine.
go func(index int, disk StorageAPI) { go func(index int, disk StorageAPI) {
@ -409,6 +417,11 @@ func healFormatXL(storageDisks []StorageAPI) error {
var referenceConfig *formatConfigV1 var referenceConfig *formatConfigV1
// Loads `format.json` from all disks. // Loads `format.json` from all disks.
for index, disk := range storageDisks { for index, disk := range storageDisks {
// Disk not found or ignored is a valid case.
if disk == nil {
// Proceed without healing.
return nil
}
formatXL, err := loadFormat(disk) formatXL, err := loadFormat(disk)
if err != nil { if err != nil {
if err == errUnformattedDisk { if err == errUnformattedDisk {
@ -429,11 +442,6 @@ func healFormatXL(storageDisks []StorageAPI) error {
return nil return nil
} }
// Init meta volume.
if err := initMetaVolume(storageDisks); err != nil {
return err
}
// All disks are fresh, format.json will be written by initFormatXL() // All disks are fresh, format.json will be written by initFormatXL()
if isFormatNotFound(formatConfigs) { if isFormatNotFound(formatConfigs) {
return initFormatXL(storageDisks) return initFormatXL(storageDisks)
@ -499,6 +507,10 @@ func loadFormatXL(bootstrapDisks []StorageAPI) (disks []StorageAPI, err error) {
// Try to load `format.json` bootstrap disks. // Try to load `format.json` bootstrap disks.
for index, disk := range bootstrapDisks { for index, disk := range bootstrapDisks {
if disk == nil {
diskNotFoundCount++
continue
}
var formatXL *formatConfigV1 var formatXL *formatConfigV1
formatXL, err = loadFormat(disk) formatXL, err = loadFormat(disk)
if err != nil { if err != nil {
@ -515,16 +527,13 @@ func loadFormatXL(bootstrapDisks []StorageAPI) (disks []StorageAPI, err error) {
formatConfigs[index] = formatXL formatConfigs[index] = formatXL
} }
// If all disks indicate that 'format.json' is not available // If all disks indicate that 'format.json' is not available return 'errUnformattedDisk'.
// return 'errUnformattedDisk'. if unformattedDisksFoundCnt > len(bootstrapDisks)-(len(bootstrapDisks)/2+1) {
if unformattedDisksFoundCnt == len(bootstrapDisks) {
return nil, errUnformattedDisk return nil, errUnformattedDisk
} else if diskNotFoundCount == len(bootstrapDisks) { } else if diskNotFoundCount == len(bootstrapDisks) {
return nil, errDiskNotFound return nil, errDiskNotFound
} else if diskNotFoundCount > len(bootstrapDisks)-(len(bootstrapDisks)/2+1) { } else if diskNotFoundCount > len(bootstrapDisks)-(len(bootstrapDisks)/2+1) {
return nil, errXLReadQuorum return nil, errXLReadQuorum
} else if unformattedDisksFoundCnt > len(bootstrapDisks)-(len(bootstrapDisks)/2+1) {
return nil, errXLReadQuorum
} }
// Validate the format configs read are correct. // Validate the format configs read are correct.

@ -40,7 +40,7 @@ func TestNewFS(t *testing.T) {
} }
// Initializes all disks with XL // Initializes all disks with XL
_, err := newXLObjects(disks) _, err := newXLObjects(disks, nil)
if err != nil { if err != nil {
t.Fatalf("Unable to initialize XL object, %s", err) t.Fatalf("Unable to initialize XL object, %s", err)
} }

@ -86,7 +86,7 @@ func initMetaVolume(storageDisks []StorageAPI) error {
// Initialize all disks in parallel. // Initialize all disks in parallel.
for index, disk := range storageDisks { for index, disk := range storageDisks {
if disk == nil { if disk == nil {
errs[index] = errDiskNotFound // Ignore create meta volume on disks which are not found.
continue continue
} }
wg.Add(1) wg.Add(1)
@ -135,7 +135,6 @@ func xlHouseKeeping(storageDisks []StorageAPI) error {
// Initialize all disks in parallel. // Initialize all disks in parallel.
for index, disk := range storageDisks { for index, disk := range storageDisks {
if disk == nil { if disk == nil {
errs[index] = errDiskNotFound
continue continue
} }
wg.Add(1) wg.Add(1)

@ -23,16 +23,15 @@ import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
) )
// newObjectLayer - initialize any object layer depending on the // newObjectLayer - initialize any object layer depending on the number of disks.
// number of export paths. func newObjectLayer(disks, ignoredDisks []string) (ObjectLayer, error) {
func newObjectLayer(exportPaths []string) (ObjectLayer, error) { if len(disks) == 1 {
if len(exportPaths) == 1 { exportPath := disks[0]
exportPath := exportPaths[0]
// Initialize FS object layer. // Initialize FS object layer.
return newFSObjects(exportPath) return newFSObjects(exportPath)
} }
// Initialize XL object layer. // Initialize XL object layer.
objAPI, err := newXLObjects(exportPaths) objAPI, err := newXLObjects(disks, ignoredDisks)
if err == errXLWriteQuorum { if err == errXLWriteQuorum {
return objAPI, errors.New("Disks are different with last minio server run.") return objAPI, errors.New("Disks are different with last minio server run.")
} }
@ -41,11 +40,11 @@ func newObjectLayer(exportPaths []string) (ObjectLayer, error) {
// configureServer handler returns final handler for the http server. // configureServer handler returns final handler for the http server.
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler { func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
objAPI, err := newObjectLayer(srvCmdConfig.exportPaths) objAPI, err := newObjectLayer(srvCmdConfig.disks, srvCmdConfig.ignoredDisks)
fatalIf(err, "Unable to intialize object layer.") fatalIf(err, "Unable to intialize object layer.")
// Initialize storage rpc server. // Initialize storage rpc server.
storageRPC, err := newRPCServer(srvCmdConfig.exportPaths[0]) // FIXME: should only have one path. storageRPC, err := newRPCServer(srvCmdConfig.disks[0]) // FIXME: should only have one path.
fatalIf(err, "Unable to initialize storage RPC server.") fatalIf(err, "Unable to initialize storage RPC server.")
// Initialize API. // Initialize API.

@ -39,6 +39,11 @@ var serverCmd = cli.Command{
cli.StringFlag{ cli.StringFlag{
Name: "address", Name: "address",
Value: ":9000", Value: ":9000",
Usage: "Specify custom server \"ADDRESS:PORT\", defaults to \":9000\".",
},
cli.StringFlag{
Name: "ignore-disks",
Usage: "Specify comma separated list of disks that are offline.",
}, },
}, },
Action: serverMain, Action: serverMain,
@ -52,8 +57,13 @@ OPTIONS:
{{range .Flags}}{{.}} {{range .Flags}}{{.}}
{{end}} {{end}}
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MINIO_ACCESS_KEY: Access key string of 5 to 20 characters in length. ACCESS:
MINIO_SECRET_KEY: Secret key string of 8 to 40 characters in length. MINIO_ACCESS_KEY: Access key string of 5 to 20 characters in length.
MINIO_SECRET_KEY: Secret key string of 8 to 40 characters in length.
CACHING:
MINIO_CACHE_SIZE: Set total cache size in NN[GB|MB|KB]. Defaults to 8GB.
MINIO_CACHE_EXPIRY: Set cache expiration duration in NN[h|m|s]. Defaults to 72 hours.
EXAMPLES: EXAMPLES:
1. Start minio server. 1. Start minio server.
@ -65,16 +75,23 @@ EXAMPLES:
3. Start minio server on Windows. 3. Start minio server on Windows.
$ minio {{.Name}} C:\MyShare $ minio {{.Name}} C:\MyShare
4. Start minio server 12 disks to enable erasure coded layer with 6 data and 6 parity. 4. Start minio server on 12 disks to enable erasure coded layer with 6 data and 6 parity.
$ minio {{.Name}} /mnt/export1/backend /mnt/export2/backend /mnt/export3/backend /mnt/export4/backend \ $ minio {{.Name}} /mnt/export1/backend /mnt/export2/backend /mnt/export3/backend /mnt/export4/backend \
/mnt/export5/backend /mnt/export6/backend /mnt/export7/backend /mnt/export8/backend /mnt/export9/backend \ /mnt/export5/backend /mnt/export6/backend /mnt/export7/backend /mnt/export8/backend /mnt/export9/backend \
/mnt/export10/backend /mnt/export11/backend /mnt/export12/backend /mnt/export10/backend /mnt/export11/backend /mnt/export12/backend
5. Start minio server on 12 disks while ignoring two disks for initialization.
$ minio {{.Name}} --ignore-disks=/mnt/export1/backend,/mnt/export2/backend /mnt/export1/backend \
/mnt/export2/backend /mnt/export3/backend /mnt/export4/backend /mnt/export5/backend /mnt/export6/backend \
/mnt/export7/backend /mnt/export8/backend /mnt/export9/backend /mnt/export10/backend /mnt/export11/backend \
/mnt/export12/backend
`, `,
} }
type serverCmdConfig struct { type serverCmdConfig struct {
serverAddr string serverAddr string
exportPaths []string disks []string
ignoredDisks []string
} }
// configureServer configure a new server instance // configureServer configure a new server instance
@ -292,13 +309,17 @@ func serverMain(c *cli.Context) {
// Check if requested port is available. // Check if requested port is available.
checkPortAvailability(getPort(net.JoinHostPort(host, port))) checkPortAvailability(getPort(net.JoinHostPort(host, port)))
// Save all command line args as export paths. // Disks to be ignored in server init, to skip format healing.
exportPaths := c.Args() ignoredDisks := strings.Split(c.String("ignore-disks"), ",")
// Disks to be used in server init.
disks := c.Args()
// Configure server. // Configure server.
apiServer := configureServer(serverCmdConfig{ apiServer := configureServer(serverCmdConfig{
serverAddr: serverAddress, serverAddr: serverAddress,
exportPaths: exportPaths, disks: disks,
ignoredDisks: ignoredDisks,
}) })
// Credential. // Credential.

@ -129,7 +129,7 @@ func StartTestServer(t TestErrHandler, instanceType string) TestServer {
testServer.AccessKey = credentials.AccessKeyID testServer.AccessKey = credentials.AccessKeyID
testServer.SecretKey = credentials.SecretAccessKey testServer.SecretKey = credentials.SecretAccessKey
// Run TestServer. // Run TestServer.
testServer.Server = httptest.NewServer(configureServerHandler(serverCmdConfig{exportPaths: erasureDisks})) testServer.Server = httptest.NewServer(configureServerHandler(serverCmdConfig{disks: erasureDisks}))
return testServer return testServer
} }
@ -632,7 +632,7 @@ func getXLObjectLayer() (ObjectLayer, []string, error) {
erasureDisks = append(erasureDisks, path) erasureDisks = append(erasureDisks, path)
} }
objLayer, err := newXLObjects(erasureDisks) objLayer, err := newXLObjects(erasureDisks, nil)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

@ -93,8 +93,19 @@ func checkSufficientDisks(disks []string) error {
return nil return nil
} }
// isDiskFound - validates if the disk is found in a list of input disks.
func isDiskFound(disk string, disks []string) bool {
for _, d := range disks {
// Disk found return
if disk == d {
return true
}
}
return false
}
// newXLObjects - initialize new xl object layer. // newXLObjects - initialize new xl object layer.
func newXLObjects(disks []string) (ObjectLayer, error) { func newXLObjects(disks, ignoredDisks []string) (ObjectLayer, error) {
// Validate if input disks are sufficient. // Validate if input disks are sufficient.
if err := checkSufficientDisks(disks); err != nil { if err := checkSufficientDisks(disks); err != nil {
return nil, err return nil, err
@ -103,9 +114,14 @@ func newXLObjects(disks []string) (ObjectLayer, error) {
// Bootstrap disks. // Bootstrap disks.
storageDisks := make([]StorageAPI, len(disks)) storageDisks := make([]StorageAPI, len(disks))
for index, disk := range disks { for index, disk := range disks {
// Check if disk is ignored.
if isDiskFound(disk, ignoredDisks) {
storageDisks[index] = nil
continue
}
var err error var err error
// Intentionally ignore disk not found errors. XL will // Intentionally ignore disk not found errors. XL is designed
// manage such errors internally. // to handle these errors internally.
storageDisks[index], err = newStorageAPI(disk) storageDisks[index], err = newStorageAPI(disk)
if err != nil && err != errDiskNotFound { if err != nil && err != errDiskNotFound {
return nil, err return nil, err
@ -122,12 +138,14 @@ func newXLObjects(disks []string) (ObjectLayer, error) {
return nil, err return nil, err
} }
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolume(storageDisks); err != nil {
return nil, fmt.Errorf("Unable to initialize '.minio' meta volume, %s", err)
}
// Handles different cases properly. // Handles different cases properly.
switch reduceFormatErrs(sErrs, len(storageDisks)) { switch reduceFormatErrs(sErrs, len(storageDisks)) {
case errUnformattedDisk: case errUnformattedDisk:
if err := initMetaVolume(storageDisks); err != nil {
return nil, fmt.Errorf("Unable to initialize '.minio' meta volume, %s", err)
}
// All drives online but fresh, initialize format. // All drives online but fresh, initialize format.
if err := initFormatXL(storageDisks); err != nil { if err := initFormatXL(storageDisks); err != nil {
return nil, fmt.Errorf("Unable to initialize format, %s", err) return nil, fmt.Errorf("Unable to initialize format, %s", err)
@ -139,8 +157,8 @@ func newXLObjects(disks []string) (ObjectLayer, error) {
return nil, fmt.Errorf("Unable to heal backend %s", err) return nil, fmt.Errorf("Unable to heal backend %s", err)
} }
case errSomeDiskOffline: case errSomeDiskOffline:
// Some disks offline but some report missing format.json. // FIXME: in future.
// FIXME. return nil, fmt.Errorf("Unable to initialize format %s and %s", errSomeDiskOffline, errSomeDiskUnformatted)
} }
// Runs house keeping code, like t, cleaning up tmp files etc. // Runs house keeping code, like t, cleaning up tmp files etc.

@ -132,7 +132,7 @@ func TestNewXL(t *testing.T) {
defer removeAll(disk) defer removeAll(disk)
} }
// Initializes all erasure disks // Initializes all erasure disks
_, err := newXLObjects(erasureDisks) _, err := newXLObjects(erasureDisks, nil)
if err != nil { if err != nil {
t.Fatalf("Unable to initialize erasure, %s", err) t.Fatalf("Unable to initialize erasure, %s", err)
} }

Loading…
Cancel
Save