fix: add stricter validation for erasure server pools (#11299)

During expansion we need to validate if

- new deployment is expanded with newer constraints
- existing deployment is expanded with older constraints
- multiple server pools rejected if they have different
  deploymentID and distribution algo
master
Harshavardhana 4 years ago committed by GitHub
parent b5049d541f
commit 1ad2b7b699
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      Makefile
  2. 11
      cmd/endpoint-ellipses.go
  3. 48
      cmd/erasure-server-pool.go
  4. 6
      cmd/erasure-sets_test.go
  5. 5
      cmd/format-erasure.go
  6. 8
      cmd/prepare-storage.go

@ -38,7 +38,7 @@ fmt:
lint: lint:
@echo "Running $@ check" @echo "Running $@ check"
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=10m --config ./.golangci.yml
ruleguard: ruleguard:
@echo "Running $@ check" @echo "Running $@ check"

@ -363,19 +363,12 @@ func createServerEndpoints(serverAddr string, args ...string) (
} }
var foundPrevLocal bool var foundPrevLocal bool
var commonParityDrives int
for _, arg := range args { for _, arg := range args {
setArgs, err := GetAllSets(arg) setArgs, err := GetAllSets(arg)
if err != nil { if err != nil {
return nil, -1, err return nil, -1, err
} }
parityDrives := ecDrivesNoConfig(len(setArgs[0]))
if commonParityDrives != 0 && commonParityDrives != parityDrives {
return nil, -1, fmt.Errorf("All serverPools should have same parity ratio - expected %d, got %d", commonParityDrives, parityDrives)
}
endpointList, gotSetupType, err := CreateEndpoints(serverAddr, foundPrevLocal, setArgs...) endpointList, gotSetupType, err := CreateEndpoints(serverAddr, foundPrevLocal, setArgs...)
if err != nil { if err != nil {
return nil, -1, err return nil, -1, err
@ -388,10 +381,6 @@ func createServerEndpoints(serverAddr string, args ...string) (
return nil, -1, err return nil, -1, err
} }
foundPrevLocal = endpointList.atleastOneEndpointLocal() foundPrevLocal = endpointList.atleastOneEndpointLocal()
if commonParityDrives == 0 {
commonParityDrives = ecDrivesNoConfig(len(setArgs[0]))
}
if setupType == UnknownSetupType { if setupType == UnknownSetupType {
setupType = gotSetupType setupType = gotSetupType
} }

@ -55,8 +55,11 @@ func (z *erasureServerPools) SingleZone() bool {
// Initialize new pool of erasure sets. // Initialize new pool of erasure sets.
func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, error) { func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, error) {
var ( var (
deploymentID string deploymentID string
err error distributionAlgo string
commonParityDrives int
drivesPerSet int
err error
formats = make([]*formatErasureV3, len(endpointServerPools)) formats = make([]*formatErasureV3, len(endpointServerPools))
storageDisks = make([][]StorageAPI, len(endpointServerPools)) storageDisks = make([][]StorageAPI, len(endpointServerPools))
@ -72,15 +75,54 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
localDrives = append(localDrives, endpoint.Path) localDrives = append(localDrives, endpoint.Path)
} }
} }
if drivesPerSet == 0 {
drivesPerSet = ep.DrivesPerSet
}
if commonParityDrives == 0 {
commonParityDrives = ecDrivesNoConfig(ep.DrivesPerSet)
}
// Once distribution algo is set, validate it for the next pool,
// before proceeding to write to drives.
switch distributionAlgo {
case formatErasureVersionV3DistributionAlgoV2:
if drivesPerSet != 0 && drivesPerSet != ep.DrivesPerSet {
return nil, fmt.Errorf("All legacy serverPools should have same drive per set ratio - expected %d, got %d", drivesPerSet, ep.DrivesPerSet)
}
case formatErasureVersionV3DistributionAlgoV3:
parityDrives := ecDrivesNoConfig(ep.DrivesPerSet)
if commonParityDrives != 0 && commonParityDrives != parityDrives {
return nil, fmt.Errorf("All current serverPools should have same parity ratio - expected %d, got %d", commonParityDrives, parityDrives)
}
}
storageDisks[i], formats[i], err = waitForFormatErasure(local, ep.Endpoints, i+1, storageDisks[i], formats[i], err = waitForFormatErasure(local, ep.Endpoints, i+1,
ep.SetCount, ep.DrivesPerSet, deploymentID) ep.SetCount, ep.DrivesPerSet, deploymentID, distributionAlgo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if deploymentID == "" { if deploymentID == "" {
// all zones should have same deployment ID // all zones should have same deployment ID
deploymentID = formats[i].ID deploymentID = formats[i].ID
} }
if distributionAlgo == "" {
distributionAlgo = formats[i].Erasure.DistributionAlgo
}
// Validate if users brought different DeploymentID pools.
if deploymentID != formats[i].ID {
return nil, fmt.Errorf("All serverPools should have same deployment ID expected %s, got %s", deploymentID, formats[i].ID)
}
// Validate if users brought different different distribution algo pools.
if distributionAlgo != formats[i].Erasure.DistributionAlgo {
return nil, fmt.Errorf("All serverPools should have same distributionAlgo expected %s, got %s", distributionAlgo, formats[i].Erasure.DistributionAlgo)
}
z.serverPools[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i]) z.serverPools[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i])
if err != nil { if err != nil {
return nil, err return nil, err

@ -173,18 +173,18 @@ func TestNewErasureSets(t *testing.T) {
} }
endpoints := mustGetNewEndpoints(erasureDisks...) endpoints := mustGetNewEndpoints(erasureDisks...)
_, _, err := waitForFormatErasure(true, endpoints, 1, 0, 16, "") _, _, err := waitForFormatErasure(true, endpoints, 1, 0, 16, "", "")
if err != errInvalidArgument { if err != errInvalidArgument {
t.Fatalf("Expecting error, got %s", err) t.Fatalf("Expecting error, got %s", err)
} }
_, _, err = waitForFormatErasure(true, nil, 1, 1, 16, "") _, _, err = waitForFormatErasure(true, nil, 1, 1, 16, "", "")
if err != errInvalidArgument { if err != errInvalidArgument {
t.Fatalf("Expecting error, got %s", err) t.Fatalf("Expecting error, got %s", err)
} }
// Initializes all erasure disks // Initializes all erasure disks
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "") storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "", "")
if err != nil { if err != nil {
t.Fatalf("Unable to format disks for erasure, %s", err) t.Fatalf("Unable to format disks for erasure, %s", err)
} }

@ -837,7 +837,7 @@ func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats
} }
// initFormatErasure - save Erasure format configuration on all disks. // initFormatErasure - save Erasure format configuration on all disks.
func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, setDriveCount int, deploymentID string, sErrs []error) (*formatErasureV3, error) { func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, setDriveCount int, deploymentID, distributionAlgo string, sErrs []error) (*formatErasureV3, error) {
format := newFormatErasureV3(setCount, setDriveCount) format := newFormatErasureV3(setCount, setDriveCount)
formats := make([]*formatErasureV3, len(storageDisks)) formats := make([]*formatErasureV3, len(storageDisks))
wantAtMost := ecDrivesNoConfig(setDriveCount) wantAtMost := ecDrivesNoConfig(setDriveCount)
@ -848,6 +848,9 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
disk := storageDisks[i*setDriveCount+j] disk := storageDisks[i*setDriveCount+j]
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
if distributionAlgo != "" {
newFormat.Erasure.DistributionAlgo = distributionAlgo
}
if deploymentID != "" { if deploymentID != "" {
newFormat.ID = deploymentID newFormat.ID = deploymentID
} }

@ -228,7 +228,7 @@ func isServerResolvable(endpoint Endpoint) error {
// connect to list of endpoints and load all Erasure disk formats, validate the formats are correct // connect to list of endpoints and load all Erasure disk formats, validate the formats are correct
// and are in quorum, if no formats are found attempt to initialize all of them for the first // and are in quorum, if no formats are found attempt to initialize all of them for the first
// time. additionally make sure to close all the disks used in this attempt. // time. additionally make sure to close all the disks used in this attempt.
func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID string) (storageDisks []StorageAPI, format *formatErasureV3, err error) { func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID, distributionAlgo string) (storageDisks []StorageAPI, format *formatErasureV3, err error) {
// Initialize all storage disks // Initialize all storage disks
storageDisks, errs := initStorageDisksWithErrors(endpoints) storageDisks, errs := initStorageDisksWithErrors(endpoints)
@ -276,7 +276,7 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints,
humanize.Ordinal(poolCount), setCount, setDriveCount) humanize.Ordinal(poolCount), setCount, setDriveCount)
// Initialize erasure code format on disks // Initialize erasure code format on disks
format, err = initFormatErasure(GlobalContext, storageDisks, setCount, setDriveCount, deploymentID, sErrs) format, err = initFormatErasure(GlobalContext, storageDisks, setCount, setDriveCount, deploymentID, distributionAlgo, sErrs)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -345,7 +345,7 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints,
} }
// Format disks before initialization of object layer. // Format disks before initialization of object layer.
func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID string) ([]StorageAPI, *formatErasureV3, error) { func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID, distributionAlgo string) ([]StorageAPI, *formatErasureV3, error) {
if len(endpoints) == 0 || setCount == 0 || setDriveCount == 0 { if len(endpoints) == 0 || setCount == 0 || setDriveCount == 0 {
return nil, nil, errInvalidArgument return nil, nil, errInvalidArgument
} }
@ -372,7 +372,7 @@ func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCou
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
storageDisks, format, err := connectLoadInitFormats(tries, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID) storageDisks, format, err := connectLoadInitFormats(tries, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID, distributionAlgo)
if err != nil { if err != nil {
tries++ tries++
switch err { switch err {

Loading…
Cancel
Save