From ad726b49b4799cdb878ca0a6cdf06ea2dd3c19e7 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 15 Oct 2020 14:28:50 -0700 Subject: [PATCH] rename zones to serverSets to avoid terminology conflict (#10679) we are bringing in availability zones, we should avoid zones as per server expansion concept. --- cmd/admin-handlers_test.go | 4 +- cmd/admin-server-info.go | 6 +- cmd/background-newdisks-heal-ops.go | 12 +- cmd/bootstrap-peer-server.go | 10 +- cmd/consolelogger.go | 8 +- cmd/endpoint-ellipses.go | 14 +- cmd/endpoint.go | 30 +- cmd/erasure-common_test.go | 4 +- cmd/erasure-healing-common_test.go | 8 +- cmd/erasure-healing_test.go | 20 +- cmd/erasure-metadata-utils_test.go | 8 +- cmd/erasure-object_test.go | 36 +- ...rasure-zones.go => erasure-server-sets.go} | 548 +++++++++--------- cmd/erasure-sets.go | 4 +- cmd/erasure-sets_test.go | 4 +- cmd/globals.go | 2 +- cmd/lock-rest-server.go | 6 +- cmd/notification.go | 2 +- cmd/obdinfo.go | 6 +- cmd/peer-rest-client.go | 6 +- cmd/peer-rest-server.go | 8 +- cmd/routers.go | 10 +- cmd/server-main.go | 8 +- cmd/server-main_test.go | 2 +- cmd/storage-rest-server.go | 4 +- cmd/test-utils_test.go | 26 +- cmd/web-handlers_test.go | 8 +- docs/distributed/DESIGN.md | 16 +- docs/distributed/README.md | 4 +- docs/zh_CN/distributed/DESIGN.md | 8 +- 30 files changed, 416 insertions(+), 416 deletions(-) rename cmd/{erasure-zones.go => erasure-server-sets.go} (68%) diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index e7b56478c..1bfccda70 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -105,8 +105,8 @@ func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error) } globalPolicySys = NewPolicySys() - objLayer := &erasureZones{zones: make([]*erasureSets, 1)} - objLayer.zones[0], err = newErasureSets(ctx, endpoints, storageDisks, format) + objLayer := &erasureServerSets{serverSets: make([]*erasureSets, 1)} + objLayer.serverSets[0], err = newErasureSets(ctx, endpoints, storageDisks, format) if err != nil { return nil, nil, err } diff --git a/cmd/admin-server-info.go b/cmd/admin-server-info.go index cbf4d4939..cf6332312 100644 --- a/cmd/admin-server-info.go +++ b/cmd/admin-server-info.go @@ -24,13 +24,13 @@ import ( // getLocalServerProperty - returns madmin.ServerProperties for only the // local endpoints from given list of endpoints -func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties { +func getLocalServerProperty(endpointServerSets EndpointServerSets, r *http.Request) madmin.ServerProperties { addr := r.Host if globalIsDistErasure { - addr = GetLocalPeer(endpointZones) + addr = GetLocalPeer(endpointServerSets) } network := make(map[string]string) - for _, ep := range endpointZones { + for _, ep := range endpointServerSets { for _, endpoint := range ep.Endpoints { nodeName := endpoint.Host if nodeName == "" { diff --git a/cmd/background-newdisks-heal-ops.go b/cmd/background-newdisks-heal-ops.go index ac5662556..794a65d98 100644 --- a/cmd/background-newdisks-heal-ops.go +++ b/cmd/background-newdisks-heal-ops.go @@ -39,7 +39,7 @@ type healingTracker struct { } func initAutoHeal(ctx context.Context, objAPI ObjectLayer) { - z, ok := objAPI.(*erasureZones) + z, ok := objAPI.(*erasureServerSets) if !ok { return } @@ -107,7 +107,7 @@ func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) { // monitorLocalDisksAndHeal - ensures that detected new disks are healed // 1. Only the concerned erasure set will be listed and healed // 2. Only the node hosting the disk is responsible to perform the heal -func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, bgSeq *healSequence) { +func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerSets, bgSeq *healSequence) { // Perform automatic disk healing when a disk is replaced locally. for { select { @@ -129,8 +129,8 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, bgSeq *healS logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...", len(healDisks))) - erasureSetInZoneDisksToHeal = make([]map[int][]StorageAPI, len(z.zones)) - for i := range z.zones { + erasureSetInZoneDisksToHeal = make([]map[int][]StorageAPI, len(z.serverSets)) + for i := range z.serverSets { erasureSetInZoneDisksToHeal[i] = map[int][]StorageAPI{} } } @@ -149,7 +149,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, bgSeq *healS } // Calculate the set index where the current endpoint belongs - setIndex, _, err := findDiskIndex(z.zones[zoneIdx].format, format) + setIndex, _, err := findDiskIndex(z.serverSets[zoneIdx].format, format) if err != nil { printEndpointError(endpoint, err, false) continue @@ -164,7 +164,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, bgSeq *healS for _, disk := range disks { logger.Info("Healing disk '%s' on %s zone", disk, humanize.Ordinal(i+1)) - lbDisks := z.zones[i].sets[setIndex].getOnlineDisks() + lbDisks := z.serverSets[i].sets[setIndex].getOnlineDisks() if err := healErasureSet(ctx, setIndex, buckets, lbDisks); err != nil { logger.LogIf(ctx, err) continue diff --git a/cmd/bootstrap-peer-server.go b/cmd/bootstrap-peer-server.go index 1064a6405..22af35e23 100644 --- a/cmd/bootstrap-peer-server.go +++ b/cmd/bootstrap-peer-server.go @@ -54,7 +54,7 @@ type bootstrapRESTServer struct{} type ServerSystemConfig struct { MinioPlatform string MinioRuntime string - MinioEndpoints EndpointZones + MinioEndpoints EndpointServerSets } // Diff - returns error on first difference found in two configs. @@ -161,9 +161,9 @@ func (client *bootstrapRESTClient) Verify(ctx context.Context, srcCfg ServerSyst return srcCfg.Diff(recvCfg) } -func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones) error { +func verifyServerSystemConfig(ctx context.Context, endpointServerSets EndpointServerSets) error { srcCfg := getServerSystemCfg() - clnts := newBootstrapRESTClients(endpointZones) + clnts := newBootstrapRESTClients(endpointServerSets) var onlineServers int var offlineEndpoints []string var retries int @@ -198,10 +198,10 @@ func verifyServerSystemConfig(ctx context.Context, endpointZones EndpointZones) return nil } -func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient { +func newBootstrapRESTClients(endpointServerSets EndpointServerSets) []*bootstrapRESTClient { seenHosts := set.NewStringSet() var clnts []*bootstrapRESTClient - for _, ep := range endpointZones { + for _, ep := range endpointServerSets { for _, endpoint := range ep.Endpoints { if seenHosts.Contains(endpoint.Host) { continue diff --git a/cmd/consolelogger.go b/cmd/consolelogger.go index 14a101922..af9d2a47a 100644 --- a/cmd/consolelogger.go +++ b/cmd/consolelogger.go @@ -40,8 +40,8 @@ type HTTPConsoleLoggerSys struct { logBuf *ring.Ring } -func mustGetNodeName(endpointZones EndpointZones) (nodeName string) { - host, err := xnet.ParseHost(GetLocalPeer(endpointZones)) +func mustGetNodeName(endpointServerSets EndpointServerSets) (nodeName string) { + host, err := xnet.ParseHost(GetLocalPeer(endpointServerSets)) if err != nil { logger.FatalIf(err, "Unable to start console logging subsystem") } @@ -63,8 +63,8 @@ func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys { } // SetNodeName - sets the node name if any after distributed setup has initialized -func (sys *HTTPConsoleLoggerSys) SetNodeName(endpointZones EndpointZones) { - sys.nodeName = mustGetNodeName(endpointZones) +func (sys *HTTPConsoleLoggerSys) SetNodeName(endpointServerSets EndpointServerSets) { + sys.nodeName = mustGetNodeName(endpointServerSets) } // HasLogListeners returns true if console log listeners are registered diff --git a/cmd/endpoint-ellipses.go b/cmd/endpoint-ellipses.go index 4da67e154..86426f113 100644 --- a/cmd/endpoint-ellipses.go +++ b/cmd/endpoint-ellipses.go @@ -329,7 +329,7 @@ var ( // CreateServerEndpoints - validates and creates new endpoints from input args, supports // both ellipses and without ellipses transparently. func createServerEndpoints(serverAddr string, args ...string) ( - endpointZones EndpointZones, setupType SetupType, err error) { + endpointServerSets EndpointServerSets, setupType SetupType, err error) { if len(args) == 0 { return nil, -1, errInvalidArgument @@ -352,13 +352,13 @@ func createServerEndpoints(serverAddr string, args ...string) ( if err != nil { return nil, -1, err } - endpointZones = append(endpointZones, ZoneEndpoints{ + endpointServerSets = append(endpointServerSets, ZoneEndpoints{ SetCount: len(setArgs), DrivesPerSet: len(setArgs[0]), Endpoints: endpointList, }) setupType = newSetupType - return endpointZones, setupType, nil + return endpointServerSets, setupType, nil } var prevSetupType SetupType @@ -374,12 +374,12 @@ func createServerEndpoints(serverAddr string, args ...string) ( return nil, -1, err } if setDriveCount != 0 && setDriveCount != len(setArgs[0]) { - return nil, -1, fmt.Errorf("All zones should have same drive per set ratio - expected %d, got %d", setDriveCount, len(setArgs[0])) + return nil, -1, fmt.Errorf("All serverSets should have same drive per set ratio - expected %d, got %d", setDriveCount, len(setArgs[0])) } if prevSetupType != UnknownSetupType && prevSetupType != setupType { - return nil, -1, fmt.Errorf("All zones should be of the same setup-type to maintain the original SLA expectations - expected %s, got %s", prevSetupType, setupType) + return nil, -1, fmt.Errorf("All serverSets should be of the same setup-type to maintain the original SLA expectations - expected %s, got %s", prevSetupType, setupType) } - if err = endpointZones.Add(ZoneEndpoints{ + if err = endpointServerSets.Add(ZoneEndpoints{ SetCount: len(setArgs), DrivesPerSet: len(setArgs[0]), Endpoints: endpointList, @@ -393,5 +393,5 @@ func createServerEndpoints(serverAddr string, args ...string) ( prevSetupType = setupType } - return endpointZones, setupType, nil + return endpointServerSets, setupType, nil } diff --git a/cmd/endpoint.go b/cmd/endpoint.go index 471979548..ed56ebe69 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -201,12 +201,12 @@ type ZoneEndpoints struct { Endpoints Endpoints } -// EndpointZones - list of list of endpoints -type EndpointZones []ZoneEndpoints +// EndpointServerSets - list of list of endpoints +type EndpointServerSets []ZoneEndpoints // GetLocalZoneIdx returns the zone which endpoint belongs to locally. // if ep is remote this code will return -1 zoneIndex -func (l EndpointZones) GetLocalZoneIdx(ep Endpoint) int { +func (l EndpointServerSets) GetLocalZoneIdx(ep Endpoint) int { for i, zep := range l { for _, cep := range zep.Endpoints { if cep.IsLocal && ep.IsLocal { @@ -220,14 +220,14 @@ func (l EndpointZones) GetLocalZoneIdx(ep Endpoint) int { } // Add add zone endpoints -func (l *EndpointZones) Add(zeps ZoneEndpoints) error { +func (l *EndpointServerSets) Add(zeps ZoneEndpoints) error { existSet := set.NewStringSet() for _, zep := range *l { for _, ep := range zep.Endpoints { existSet.Add(ep.String()) } } - // Validate if there are duplicate endpoints across zones + // Validate if there are duplicate endpoints across serverSets for _, ep := range zeps.Endpoints { if existSet.Contains(ep.String()) { return fmt.Errorf("duplicate endpoints found") @@ -238,17 +238,17 @@ func (l *EndpointZones) Add(zeps ZoneEndpoints) error { } // FirstLocal returns true if the first endpoint is local. -func (l EndpointZones) FirstLocal() bool { +func (l EndpointServerSets) FirstLocal() bool { return l[0].Endpoints[0].IsLocal } // HTTPS - returns true if secure for URLEndpointType. -func (l EndpointZones) HTTPS() bool { +func (l EndpointServerSets) HTTPS() bool { return l[0].Endpoints.HTTPS() } // NEndpoints - returns all nodes count -func (l EndpointZones) NEndpoints() (count int) { +func (l EndpointServerSets) NEndpoints() (count int) { for _, ep := range l { count += len(ep.Endpoints) } @@ -256,7 +256,7 @@ func (l EndpointZones) NEndpoints() (count int) { } // Hostnames - returns list of unique hostnames -func (l EndpointZones) Hostnames() []string { +func (l EndpointServerSets) Hostnames() []string { foundSet := set.NewStringSet() for _, ep := range l { for _, endpoint := range ep.Endpoints { @@ -688,9 +688,9 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp // the first element from the set of peers which indicate that // they are local. There is always one entry that is local // even with repeated server endpoints. -func GetLocalPeer(endpointZones EndpointZones) (localPeer string) { +func GetLocalPeer(endpointServerSets EndpointServerSets) (localPeer string) { peerSet := set.NewStringSet() - for _, ep := range endpointZones { + for _, ep := range endpointServerSets { for _, endpoint := range ep.Endpoints { if endpoint.Type() != URLEndpointType { continue @@ -713,9 +713,9 @@ func GetLocalPeer(endpointZones EndpointZones) (localPeer string) { } // GetRemotePeers - get hosts information other than this minio service. -func GetRemotePeers(endpointZones EndpointZones) []string { +func GetRemotePeers(endpointServerSets EndpointServerSets) []string { peerSet := set.NewStringSet() - for _, ep := range endpointZones { + for _, ep := range endpointServerSets { for _, endpoint := range ep.Endpoints { if endpoint.Type() != URLEndpointType { continue @@ -745,12 +745,12 @@ func GetProxyEndpointLocalIndex(proxyEps []ProxyEndpoint) int { } // GetProxyEndpoints - get all endpoints that can be used to proxy list request. -func GetProxyEndpoints(endpointZones EndpointZones) ([]ProxyEndpoint, error) { +func GetProxyEndpoints(endpointServerSets EndpointServerSets) ([]ProxyEndpoint, error) { var proxyEps []ProxyEndpoint proxyEpSet := set.NewStringSet() - for _, ep := range endpointZones { + for _, ep := range endpointServerSets { for _, endpoint := range ep.Endpoints { if endpoint.Type() != URLEndpointType { continue diff --git a/cmd/erasure-common_test.go b/cmd/erasure-common_test.go index 31752ae84..fa79e930f 100644 --- a/cmd/erasure-common_test.go +++ b/cmd/erasure-common_test.go @@ -55,8 +55,8 @@ func TestErasureParentDirIsObject(t *testing.T) { t.Fatalf("Unexpected object name returned got %s, expected %s", objInfo.Name, objectName) } - z := obj.(*erasureZones) - xl := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + xl := z.serverSets[0].sets[0] testCases := []struct { parentIsObject bool objectName string diff --git a/cmd/erasure-healing-common_test.go b/cmd/erasure-healing-common_test.go index 31a54fece..b8aa31b27 100644 --- a/cmd/erasure-healing-common_test.go +++ b/cmd/erasure-healing-common_test.go @@ -178,8 +178,8 @@ func TestListOnlineDisks(t *testing.T) { object := "object" data := bytes.Repeat([]byte("a"), 1024) - z := obj.(*erasureZones) - erasureDisks := z.zones[0].sets[0].getDisks() + z := obj.(*erasureServerSets) + erasureDisks := z.serverSets[0].sets[0].getDisks() for i, test := range testCases { _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) if err != nil { @@ -274,8 +274,8 @@ func TestDisksWithAllParts(t *testing.T) { // make data with more than one part partCount := 3 data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount) - z := obj.(*erasureZones) - s := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + s := z.serverSets[0].sets[0] erasureDisks := s.getDisks() err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { diff --git a/cmd/erasure-healing_test.go b/cmd/erasure-healing_test.go index 6abfa953c..2fa53820b 100644 --- a/cmd/erasure-healing_test.go +++ b/cmd/erasure-healing_test.go @@ -42,8 +42,8 @@ func TestHealing(t *testing.T) { defer obj.Shutdown(context.Background()) defer removeRoots(fsDirs) - z := obj.(*erasureZones) - er := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + er := z.serverSets[0].sets[0] // Create "bucket" err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) @@ -197,8 +197,8 @@ func TestHealObjectCorrupted(t *testing.T) { } // Test 1: Remove the object backend files from the first disk. - z := objLayer.(*erasureZones) - er := z.zones[0].sets[0] + z := objLayer.(*erasureServerSets) + er := z.serverSets[0].sets[0] erasureDisks := er.getDisks() firstDisk := erasureDisks[0] err = firstDisk.DeleteFile(context.Background(), bucket, pathJoin(object, xlStorageFormatFile)) @@ -342,8 +342,8 @@ func TestHealObjectErasure(t *testing.T) { } // Remove the object backend files from the first disk. - z := obj.(*erasureZones) - er := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + er := z.serverSets[0].sets[0] firstDisk := er.getDisks()[0] _, err = obj.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{}) @@ -366,7 +366,7 @@ func TestHealObjectErasure(t *testing.T) { } erasureDisks := er.getDisks() - z.zones[0].erasureDisksMu.Lock() + z.serverSets[0].erasureDisksMu.Lock() er.getDisks = func() []StorageAPI { // Nil more than half the disks, to remove write quorum. for i := 0; i <= len(erasureDisks)/2; i++ { @@ -374,7 +374,7 @@ func TestHealObjectErasure(t *testing.T) { } return erasureDisks } - z.zones[0].erasureDisksMu.Unlock() + z.serverSets[0].erasureDisksMu.Unlock() // Try healing now, expect to receive errDiskNotFound. _, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealDeepScan}) @@ -419,8 +419,8 @@ func TestHealEmptyDirectoryErasure(t *testing.T) { } // Remove the object backend files from the first disk. - z := obj.(*erasureZones) - er := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + er := z.serverSets[0].sets[0] firstDisk := er.getDisks()[0] err = firstDisk.DeleteVol(context.Background(), pathJoin(bucket, encodeDirObject(object)), true) if err != nil { diff --git a/cmd/erasure-metadata-utils_test.go b/cmd/erasure-metadata-utils_test.go index 43341eb1d..11f8ff363 100644 --- a/cmd/erasure-metadata-utils_test.go +++ b/cmd/erasure-metadata-utils_test.go @@ -148,13 +148,13 @@ func TestShuffleDisks(t *testing.T) { t.Fatal(err) } defer removeRoots(disks) - z := objLayer.(*erasureZones) + z := objLayer.(*erasureServerSets) testShuffleDisks(t, z) } // Test shuffleDisks which returns shuffled slice of disks for their actual distribution. -func testShuffleDisks(t *testing.T, z *erasureZones) { - disks := z.zones[0].GetDisks(0)() +func testShuffleDisks(t *testing.T, z *erasureServerSets) { + disks := z.serverSets[0].GetDisks(0)() distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15} shuffledDisks := shuffleDisks(disks, distribution) // From the "distribution" above you can notice that: @@ -196,6 +196,6 @@ func TestEvalDisks(t *testing.T) { t.Fatal(err) } defer removeRoots(disks) - z := objLayer.(*erasureZones) + z := objLayer.(*erasureServerSets) testShuffleDisks(t, z) } diff --git a/cmd/erasure-object_test.go b/cmd/erasure-object_test.go index 85318a55e..26b971e5a 100644 --- a/cmd/erasure-object_test.go +++ b/cmd/erasure-object_test.go @@ -132,8 +132,8 @@ func TestErasureDeleteObjectsErasureSet(t *testing.T) { for _, dir := range fsDirs { defer os.RemoveAll(dir) } - z := obj.(*erasureZones) - xl := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + xl := z.serverSets[0].sets[0] objs = append(objs, xl) } @@ -205,8 +205,8 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) { defer obj.Shutdown(context.Background()) defer removeRoots(fsDirs) - z := obj.(*erasureZones) - xl := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + xl := z.serverSets[0].sets[0] // Create "bucket" err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) @@ -225,7 +225,7 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) { // for a 16 disk setup, quorum is 9. To simulate disks not found yet // quorum is available, we remove disks leaving quorum disks behind. erasureDisks := xl.getDisks() - z.zones[0].erasureDisksMu.Lock() + z.serverSets[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { for i := range erasureDisks[:7] { erasureDisks[i] = newNaughtyDisk(erasureDisks[i], nil, errFaultyDisk) @@ -233,7 +233,7 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) { return erasureDisks } - z.zones[0].erasureDisksMu.Unlock() + z.serverSets[0].erasureDisksMu.Unlock() _, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) if err != nil { t.Fatal(err) @@ -247,14 +247,14 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) { // Remove one more disk to 'lose' quorum, by setting it to nil. erasureDisks = xl.getDisks() - z.zones[0].erasureDisksMu.Lock() + z.serverSets[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { erasureDisks[7] = nil erasureDisks[8] = nil return erasureDisks } - z.zones[0].erasureDisksMu.Unlock() + z.serverSets[0].erasureDisksMu.Unlock() _, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) // since majority of disks are not available, metaquorum is not achieved and hence errErasureWriteQuorum error if err != toObjectErr(errErasureWriteQuorum, bucket, object) { @@ -275,8 +275,8 @@ func TestGetObjectNoQuorum(t *testing.T) { defer obj.Shutdown(context.Background()) defer removeRoots(fsDirs) - z := obj.(*erasureZones) - xl := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + xl := z.serverSets[0].sets[0] // Create "bucket" err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) @@ -311,11 +311,11 @@ func TestGetObjectNoQuorum(t *testing.T) { erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk) } } - z.zones[0].erasureDisksMu.Lock() + z.serverSets[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { return erasureDisks } - z.zones[0].erasureDisksMu.Unlock() + z.serverSets[0].erasureDisksMu.Unlock() // Fetch object from store. err = xl.GetObject(ctx, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts) if err != toObjectErr(errErasureReadQuorum, bucket, object) { @@ -338,8 +338,8 @@ func TestPutObjectNoQuorum(t *testing.T) { defer obj.Shutdown(context.Background()) defer removeRoots(fsDirs) - z := obj.(*erasureZones) - xl := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + xl := z.serverSets[0].sets[0] // Create "bucket" err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) @@ -374,11 +374,11 @@ func TestPutObjectNoQuorum(t *testing.T) { erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk) } } - z.zones[0].erasureDisksMu.Lock() + z.serverSets[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { return erasureDisks } - z.zones[0].erasureDisksMu.Unlock() + z.serverSets[0].erasureDisksMu.Unlock() // Upload new content to same object "object" _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) if err != toObjectErr(errErasureWriteQuorum, bucket, object) { @@ -404,8 +404,8 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin partCount := 3 data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount) - z := obj.(*erasureZones) - xl := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + xl := z.serverSets[0].sets[0] erasureDisks := xl.getDisks() ctx, cancel := context.WithCancel(GlobalContext) diff --git a/cmd/erasure-zones.go b/cmd/erasure-server-sets.go similarity index 68% rename from cmd/erasure-zones.go rename to cmd/erasure-server-sets.go index 850456255..5b90ac44d 100644 --- a/cmd/erasure-zones.go +++ b/cmd/erasure-server-sets.go @@ -38,34 +38,34 @@ import ( "github.com/minio/minio/pkg/sync/errgroup" ) -type erasureZones struct { +type erasureServerSets struct { GatewayUnsupported - zones []*erasureSets + serverSets []*erasureSets // Shut down async operations shutdown context.CancelFunc } -func (z *erasureZones) SingleZone() bool { - return len(z.zones) == 1 +func (z *erasureServerSets) SingleZone() bool { + return len(z.serverSets) == 1 } // Initialize new zone of erasure sets. -func newErasureZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, error) { +func newErasureServerSets(ctx context.Context, endpointServerSets EndpointServerSets) (ObjectLayer, error) { var ( deploymentID string err error - formats = make([]*formatErasureV3, len(endpointZones)) - storageDisks = make([][]StorageAPI, len(endpointZones)) - z = &erasureZones{zones: make([]*erasureSets, len(endpointZones))} + formats = make([]*formatErasureV3, len(endpointServerSets)) + storageDisks = make([][]StorageAPI, len(endpointServerSets)) + z = &erasureServerSets{serverSets: make([]*erasureSets, len(endpointServerSets))} ) var localDrives []string - local := endpointZones.FirstLocal() - for i, ep := range endpointZones { + local := endpointServerSets.FirstLocal() + for i, ep := range endpointServerSets { for _, endpoint := range ep.Endpoints { if endpoint.IsLocal { localDrives = append(localDrives, endpoint.Path) @@ -79,7 +79,7 @@ func newErasureZones(ctx context.Context, endpointZones EndpointZones) (ObjectLa if deploymentID == "" { deploymentID = formats[i].ID } - z.zones[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i]) + z.serverSets[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i]) if err != nil { return nil, err } @@ -89,19 +89,19 @@ func newErasureZones(ctx context.Context, endpointZones EndpointZones) (ObjectLa return z, nil } -func (z *erasureZones) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { - return z.zones[0].NewNSLock(ctx, bucket, objects...) +func (z *erasureServerSets) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { + return z.serverSets[0].NewNSLock(ctx, bucket, objects...) } -func (z *erasureZones) GetAllLockers() []dsync.NetLocker { - return z.zones[0].GetAllLockers() +func (z *erasureServerSets) GetAllLockers() []dsync.NetLocker { + return z.serverSets[0].GetAllLockers() } -func (z *erasureZones) SetDriveCount() int { - return z.zones[0].SetDriveCount() +func (z *erasureServerSets) SetDriveCount() int { + return z.serverSets[0].SetDriveCount() } -type zonesAvailableSpace []zoneAvailableSpace +type serverSetsAvailableSpace []zoneAvailableSpace type zoneAvailableSpace struct { Index int @@ -109,7 +109,7 @@ type zoneAvailableSpace struct { } // TotalAvailable - total available space -func (p zonesAvailableSpace) TotalAvailable() uint64 { +func (p serverSetsAvailableSpace) TotalAvailable() uint64 { total := uint64(0) for _, z := range p { total += z.Available @@ -118,42 +118,42 @@ func (p zonesAvailableSpace) TotalAvailable() uint64 { } // getAvailableZoneIdx will return an index that can hold size bytes. -// -1 is returned if no zones have available space for the size given. -func (z *erasureZones) getAvailableZoneIdx(ctx context.Context, size int64) int { - zones := z.getZonesAvailableSpace(ctx, size) - total := zones.TotalAvailable() +// -1 is returned if no serverSets have available space for the size given. +func (z *erasureServerSets) getAvailableZoneIdx(ctx context.Context, size int64) int { + serverSets := z.getServerSetsAvailableSpace(ctx, size) + total := serverSets.TotalAvailable() if total == 0 { return -1 } // choose when we reach this many choose := rand.Uint64() % total atTotal := uint64(0) - for _, zone := range zones { + for _, zone := range serverSets { atTotal += zone.Available if atTotal > choose && zone.Available > 0 { return zone.Index } } // Should not happen, but print values just in case. - logger.LogIf(ctx, fmt.Errorf("reached end of zones (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) + logger.LogIf(ctx, fmt.Errorf("reached end of serverSets (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) return -1 } -// getZonesAvailableSpace will return the available space of each zone after storing the content. +// getServerSetsAvailableSpace will return the available space of each zone after storing the content. // If there is not enough space the zone will return 0 bytes available. // Negative sizes are seen as 0 bytes. -func (z *erasureZones) getZonesAvailableSpace(ctx context.Context, size int64) zonesAvailableSpace { +func (z *erasureServerSets) getServerSetsAvailableSpace(ctx context.Context, size int64) serverSetsAvailableSpace { if size < 0 { size = 0 } - var zones = make(zonesAvailableSpace, len(z.zones)) + var serverSets = make(serverSetsAvailableSpace, len(z.serverSets)) - storageInfos := make([]StorageInfo, len(z.zones)) - g := errgroup.WithNErrs(len(z.zones)) - for index := range z.zones { + storageInfos := make([]StorageInfo, len(z.serverSets)) + g := errgroup.WithNErrs(len(z.serverSets)) + for index := range z.serverSets { index := index g.Go(func() error { - storageInfos[index] = z.zones[index].StorageUsageInfo(ctx) + storageInfos[index] = z.serverSets[index].StorageUsageInfo(ctx) return nil }, index) } @@ -182,21 +182,21 @@ func (z *erasureZones) getZonesAvailableSpace(ctx context.Context, size int64) z available = 0 } } - zones[i] = zoneAvailableSpace{ + serverSets[i] = zoneAvailableSpace{ Index: i, Available: available, } } - return zones + return serverSets } // getZoneIdx returns the found previous object and its corresponding zone idx, // if none are found falls back to most available space zone. -func (z *erasureZones) getZoneIdx(ctx context.Context, bucket, object string, opts ObjectOptions, size int64) (idx int, err error) { +func (z *erasureServerSets) getZoneIdx(ctx context.Context, bucket, object string, opts ObjectOptions, size int64) (idx int, err error) { if z.SingleZone() { return 0, nil } - for i, zone := range z.zones { + for i, zone := range z.serverSets { objInfo, err := zone.GetObjectInfo(ctx, bucket, object, opts) switch err.(type) { case ObjectNotFound: @@ -226,18 +226,18 @@ func (z *erasureZones) getZoneIdx(ctx context.Context, bucket, object string, op return idx, nil } -func (z *erasureZones) Shutdown(ctx context.Context) error { +func (z *erasureServerSets) Shutdown(ctx context.Context) error { defer z.shutdown() if z.SingleZone() { - return z.zones[0].Shutdown(ctx) + return z.serverSets[0].Shutdown(ctx) } - g := errgroup.WithNErrs(len(z.zones)) + g := errgroup.WithNErrs(len(z.serverSets)) - for index := range z.zones { + for index := range z.serverSets { index := index g.Go(func() error { - return z.zones[index].Shutdown(ctx) + return z.serverSets[index].Shutdown(ctx) }, index) } @@ -250,20 +250,20 @@ func (z *erasureZones) Shutdown(ctx context.Context) error { return nil } -func (z *erasureZones) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { +func (z *erasureServerSets) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { if z.SingleZone() { - return z.zones[0].StorageInfo(ctx, local) + return z.serverSets[0].StorageInfo(ctx, local) } var storageInfo StorageInfo - storageInfos := make([]StorageInfo, len(z.zones)) - storageInfosErrs := make([][]error, len(z.zones)) - g := errgroup.WithNErrs(len(z.zones)) - for index := range z.zones { + storageInfos := make([]StorageInfo, len(z.serverSets)) + storageInfosErrs := make([][]error, len(z.serverSets)) + g := errgroup.WithNErrs(len(z.serverSets)) + for index := range z.serverSets { index := index g.Go(func() error { - storageInfos[index], storageInfosErrs[index] = z.zones[index].StorageInfo(ctx, local) + storageInfos[index], storageInfosErrs[index] = z.serverSets[index].StorageInfo(ctx, local) return nil }, index) } @@ -284,13 +284,13 @@ func (z *erasureZones) StorageInfo(ctx context.Context, local bool) (StorageInfo storageInfo.Backend.RRSCParity = storageInfos[0].Backend.RRSCParity var errs []error - for i := range z.zones { + for i := range z.serverSets { errs = append(errs, storageInfosErrs[i]...) } return storageInfo, errs } -func (z *erasureZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { +func (z *erasureServerSets) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -301,8 +301,8 @@ func (z *erasureZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter var knownBuckets = make(map[string]struct{}) // used to deduplicate buckets. var allBuckets []BucketInfo - // Collect for each set in zones. - for _, z := range z.zones { + // Collect for each set in serverSets. + for _, z := range z.serverSets { buckets, err := z.ListBuckets(ctx) if err != nil { return err @@ -406,17 +406,17 @@ func (z *erasureZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter return firstErr } -// MakeBucketWithLocation - creates a new bucket across all zones simultaneously +// MakeBucketWithLocation - creates a new bucket across all serverSets simultaneously // even if one of the sets fail to create buckets, we proceed all the successful // operations. -func (z *erasureZones) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { - g := errgroup.WithNErrs(len(z.zones)) +func (z *erasureServerSets) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { + g := errgroup.WithNErrs(len(z.serverSets)) // Create buckets in parallel across all sets. - for index := range z.zones { + for index := range z.serverSets { index := index g.Go(func() error { - return z.zones[index].MakeBucketWithLocation(ctx, bucket, opts) + return z.serverSets[index].MakeBucketWithLocation(ctx, bucket, opts) }, index) } @@ -446,14 +446,14 @@ func (z *erasureZones) MakeBucketWithLocation(ctx context.Context, bucket string } -func (z *erasureZones) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { +func (z *erasureServerSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { if err = checkGetObjArgs(ctx, bucket, object); err != nil { return nil, err } object = encodeDirObject(object) - for _, zone := range z.zones { + for _, zone := range z.serverSets { gr, err = zone.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) if err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { @@ -469,14 +469,14 @@ func (z *erasureZones) GetObjectNInfo(ctx context.Context, bucket, object string return gr, ObjectNotFound{Bucket: bucket, Object: object} } -func (z *erasureZones) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { +func (z *erasureServerSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { if err := checkGetObjArgs(ctx, bucket, object); err != nil { return err } object = encodeDirObject(object) - for _, zone := range z.zones { + for _, zone := range z.serverSets { if err := zone.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts); err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { continue @@ -491,13 +491,13 @@ func (z *erasureZones) GetObject(ctx context.Context, bucket, object string, sta return ObjectNotFound{Bucket: bucket, Object: object} } -func (z *erasureZones) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (z *erasureServerSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { if err = checkGetObjArgs(ctx, bucket, object); err != nil { return objInfo, err } object = encodeDirObject(object) - for _, zone := range z.zones { + for _, zone := range z.serverSets { objInfo, err = zone.GetObjectInfo(ctx, bucket, object, opts) if err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { @@ -515,7 +515,7 @@ func (z *erasureZones) GetObjectInfo(ctx context.Context, bucket, object string, } // PutObject - writes an object to least used erasure zone. -func (z *erasureZones) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) { +func (z *erasureServerSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) { // Validate put object input args. if err := checkPutObjectArgs(ctx, bucket, object, z); err != nil { return ObjectInfo{}, err @@ -524,7 +524,7 @@ func (z *erasureZones) PutObject(ctx context.Context, bucket string, object stri object = encodeDirObject(object) if z.SingleZone() { - return z.zones[0].PutObject(ctx, bucket, object, data, opts) + return z.serverSets[0].PutObject(ctx, bucket, object, data, opts) } idx, err := z.getZoneIdx(ctx, bucket, object, opts, data.Size()) @@ -533,10 +533,10 @@ func (z *erasureZones) PutObject(ctx context.Context, bucket string, object stri } // Overwrite the object at the right zone - return z.zones[idx].PutObject(ctx, bucket, object, data, opts) + return z.serverSets[idx].PutObject(ctx, bucket, object, data, opts) } -func (z *erasureZones) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (z *erasureServerSets) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { if err = checkDelObjArgs(ctx, bucket, object); err != nil { return objInfo, err } @@ -544,9 +544,9 @@ func (z *erasureZones) DeleteObject(ctx context.Context, bucket string, object s object = encodeDirObject(object) if z.SingleZone() { - return z.zones[0].DeleteObject(ctx, bucket, object, opts) + return z.serverSets[0].DeleteObject(ctx, bucket, object, opts) } - for _, zone := range z.zones { + for _, zone := range z.serverSets { objInfo, err = zone.DeleteObject(ctx, bucket, object, opts) if err == nil { return objInfo, nil @@ -559,7 +559,7 @@ func (z *erasureZones) DeleteObject(ctx context.Context, bucket string, object s return objInfo, err } -func (z *erasureZones) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { +func (z *erasureServerSets) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { derrs := make([]error, len(objects)) dobjects := make([]DeletedObject, len(objects)) objSets := set.NewStringSet() @@ -580,7 +580,7 @@ func (z *erasureZones) DeleteObjects(ctx context.Context, bucket string, objects } defer multiDeleteLock.Unlock() - for _, zone := range z.zones { + for _, zone := range z.serverSets { deletedObjects, errs := zone.DeleteObjects(ctx, bucket, objects, opts) for i, derr := range errs { if derrs[i] == nil { @@ -596,7 +596,7 @@ func (z *erasureZones) DeleteObjects(ctx context.Context, bucket string, objects return dobjects, derrs } -func (z *erasureZones) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { +func (z *erasureServerSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { srcObject = encodeDirObject(srcObject) dstObject = encodeDirObject(dstObject) @@ -610,12 +610,12 @@ func (z *erasureZones) CopyObject(ctx context.Context, srcBucket, srcObject, dst if cpSrcDstSame && srcInfo.metadataOnly { // Version ID is set for the destination and source == destination version ID. if dstOpts.VersionID != "" && srcOpts.VersionID == dstOpts.VersionID { - return z.zones[zoneIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) + return z.serverSets[zoneIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) } // Destination is not versioned and source version ID is empty // perform an in-place update. if !dstOpts.Versioned && srcOpts.VersionID == "" { - return z.zones[zoneIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) + return z.serverSets[zoneIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) } // Destination is versioned, source is not destination version, // as a special case look for if the source object is not legacy @@ -625,7 +625,7 @@ func (z *erasureZones) CopyObject(ctx context.Context, srcBucket, srcObject, dst // CopyObject optimization where we don't create an entire copy // of the content, instead we add a reference. srcInfo.versionOnly = true - return z.zones[zoneIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) + return z.serverSets[zoneIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) } } @@ -636,10 +636,10 @@ func (z *erasureZones) CopyObject(ctx context.Context, srcBucket, srcObject, dst VersionID: dstOpts.VersionID, } - return z.zones[zoneIdx].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts) + return z.serverSets[zoneIdx].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts) } -func (z *erasureZones) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { +func (z *erasureServerSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { marker := continuationToken if marker == "" { marker = startAfter @@ -660,21 +660,21 @@ func (z *erasureZones) ListObjectsV2(ctx context.Context, bucket, prefix, contin return listObjectsV2Info, err } -func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { +func (z *erasureServerSets) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { - zonesEntryChs := make([][]FileInfoCh, 0, len(z.zones)) - zonesListTolerancePerSet := make([]int, 0, len(z.zones)) + serverSetsEntryChs := make([][]FileInfoCh, 0, len(z.serverSets)) + serverSetsListTolerancePerSet := make([]int, 0, len(z.serverSets)) endWalkCh := make(chan struct{}) defer close(endWalkCh) - for _, zone := range z.zones { - zonesEntryChs = append(zonesEntryChs, + for _, zone := range z.serverSets { + serverSetsEntryChs = append(serverSetsEntryChs, zone.startMergeWalksN(ctx, bucket, prefix, "", true, endWalkCh, zone.listTolerancePerSet, false)) if zone.listTolerancePerSet == -1 { - zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.setDriveCount/2) + serverSetsListTolerancePerSet = append(serverSetsListTolerancePerSet, zone.setDriveCount/2) } else { - zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet-2) + serverSetsListTolerancePerSet = append(serverSetsListTolerancePerSet, zone.listTolerancePerSet-2) } } @@ -682,11 +682,11 @@ func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, var eof bool var prevPrefix string - zonesEntriesInfos := make([][]FileInfo, 0, len(zonesEntryChs)) - zonesEntriesValid := make([][]bool, 0, len(zonesEntryChs)) - for _, entryChs := range zonesEntryChs { - zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs))) - zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) + serverSetsEntriesInfos := make([][]FileInfo, 0, len(serverSetsEntryChs)) + serverSetsEntriesValid := make([][]bool, 0, len(serverSetsEntryChs)) + for _, entryChs := range serverSetsEntryChs { + serverSetsEntriesInfos = append(serverSetsEntriesInfos, make([]FileInfo, len(entryChs))) + serverSetsEntriesValid = append(serverSetsEntriesValid, make([]bool, len(entryChs))) } for { @@ -694,13 +694,13 @@ func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, break } - result, quorumCount, zoneIndex, ok := lexicallySortedEntryZone(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + result, quorumCount, zoneIndex, ok := lexicallySortedEntryZone(serverSetsEntryChs, serverSetsEntriesInfos, serverSetsEntriesValid) if !ok { eof = true break } - if quorumCount < zonesListTolerancePerSet[zoneIndex] { + if quorumCount < serverSetsListTolerancePerSet[zoneIndex] { // Skip entries which are not found on upto expected tolerance continue } @@ -777,7 +777,7 @@ func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, return result, nil } -func (z *erasureZones) listObjectsSplunk(ctx context.Context, bucket, prefix, marker string, maxKeys int) (loi ListObjectsInfo, err error) { +func (z *erasureServerSets) listObjectsSplunk(ctx context.Context, bucket, prefix, marker string, maxKeys int) (loi ListObjectsInfo, err error) { if strings.Contains(prefix, guidSplunk) { logger.LogIf(ctx, NotImplemented{}) return loi, NotImplemented{} @@ -785,26 +785,26 @@ func (z *erasureZones) listObjectsSplunk(ctx context.Context, bucket, prefix, ma recursive := true - zonesEntryChs := make([][]FileInfoCh, 0, len(z.zones)) - zonesEndWalkCh := make([]chan struct{}, 0, len(z.zones)) - zonesListTolerancePerSet := make([]int, 0, len(z.zones)) + serverSetsEntryChs := make([][]FileInfoCh, 0, len(z.serverSets)) + serverSetsEndWalkCh := make([]chan struct{}, 0, len(z.serverSets)) + serverSetsListTolerancePerSet := make([]int, 0, len(z.serverSets)) - for _, zone := range z.zones { + for _, zone := range z.serverSets { entryChs, endWalkCh := zone.poolSplunk.Release(listParams{bucket, recursive, marker, prefix}) if entryChs == nil { endWalkCh = make(chan struct{}) entryChs = zone.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.listTolerancePerSet, true) } - zonesEntryChs = append(zonesEntryChs, entryChs) - zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh) + serverSetsEntryChs = append(serverSetsEntryChs, entryChs) + serverSetsEndWalkCh = append(serverSetsEndWalkCh, endWalkCh) if zone.listTolerancePerSet == -1 { - zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.setDriveCount/2) + serverSetsListTolerancePerSet = append(serverSetsListTolerancePerSet, zone.setDriveCount/2) } else { - zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet-2) + serverSetsListTolerancePerSet = append(serverSetsListTolerancePerSet, zone.listTolerancePerSet-2) } } - entries := mergeZonesEntriesCh(zonesEntryChs, maxKeys, zonesListTolerancePerSet) + entries := mergeServerSetsEntriesCh(serverSetsEntryChs, maxKeys, serverSetsListTolerancePerSet) if len(entries.Files) == 0 { return loi, nil } @@ -826,15 +826,15 @@ func (z *erasureZones) listObjectsSplunk(ctx context.Context, bucket, prefix, ma } if loi.IsTruncated { - for i, zone := range z.zones { - zone.poolSplunk.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, zonesEntryChs[i], - zonesEndWalkCh[i]) + for i, zone := range z.serverSets { + zone.poolSplunk.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, serverSetsEntryChs[i], + serverSetsEndWalkCh[i]) } } return loi, nil } -func (z *erasureZones) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (z *erasureServerSets) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { loi := ListObjectsInfo{} if err := checkListObjsArgs(ctx, bucket, prefix, marker, z); err != nil { @@ -881,26 +881,26 @@ func (z *erasureZones) listObjects(ctx context.Context, bucket, prefix, marker, recursive = false } - zonesEntryChs := make([][]FileInfoCh, 0, len(z.zones)) - zonesEndWalkCh := make([]chan struct{}, 0, len(z.zones)) - zonesListTolerancePerSet := make([]int, 0, len(z.zones)) + serverSetsEntryChs := make([][]FileInfoCh, 0, len(z.serverSets)) + serverSetsEndWalkCh := make([]chan struct{}, 0, len(z.serverSets)) + serverSetsListTolerancePerSet := make([]int, 0, len(z.serverSets)) - for _, zone := range z.zones { + for _, zone := range z.serverSets { entryChs, endWalkCh := zone.pool.Release(listParams{bucket, recursive, marker, prefix}) if entryChs == nil { endWalkCh = make(chan struct{}) entryChs = zone.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.listTolerancePerSet, false) } - zonesEntryChs = append(zonesEntryChs, entryChs) - zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh) + serverSetsEntryChs = append(serverSetsEntryChs, entryChs) + serverSetsEndWalkCh = append(serverSetsEndWalkCh, endWalkCh) if zone.listTolerancePerSet == -1 { - zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.setDriveCount/2) + serverSetsListTolerancePerSet = append(serverSetsListTolerancePerSet, zone.setDriveCount/2) } else { - zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet-2) + serverSetsListTolerancePerSet = append(serverSetsListTolerancePerSet, zone.listTolerancePerSet-2) } } - entries := mergeZonesEntriesCh(zonesEntryChs, maxKeys, zonesListTolerancePerSet) + entries := mergeServerSetsEntriesCh(serverSetsEntryChs, maxKeys, serverSetsListTolerancePerSet) if len(entries.Files) == 0 { return loi, nil } @@ -919,15 +919,15 @@ func (z *erasureZones) listObjects(ctx context.Context, bucket, prefix, marker, loi.Objects = append(loi.Objects, objInfo) } if loi.IsTruncated { - for i, zone := range z.zones { - zone.pool.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, zonesEntryChs[i], - zonesEndWalkCh[i]) + for i, zone := range z.serverSets { + zone.pool.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, serverSetsEntryChs[i], + serverSetsEndWalkCh[i]) } } return loi, nil } -// Calculate least entry across zones and across multiple FileInfo +// Calculate least entry across serverSets and across multiple FileInfo // channels, returns the least common entry and the total number of times // we found this entry. Additionally also returns a boolean // to indicate if the caller needs to call this function @@ -1020,7 +1020,7 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI return lentry, lexicallySortedEntryCount, zoneIndex, isTruncated } -// Calculate least entry across zones and across multiple FileInfoVersions +// Calculate least entry across serverSets and across multiple FileInfoVersions // channels, returns the least common entry and the total number of times // we found this entry. Additionally also returns a boolean // to indicate if the caller needs to call this function @@ -1111,24 +1111,24 @@ func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneE return lentry, lexicallySortedEntryCount, zoneIndex, isTruncated } -// mergeZonesEntriesVersionsCh - merges FileInfoVersions channel to entries upto maxKeys. -func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys int, zonesListTolerancePerSet []int) (entries FilesInfoVersions) { +// mergeServerSetsEntriesVersionsCh - merges FileInfoVersions channel to entries upto maxKeys. +func mergeServerSetsEntriesVersionsCh(serverSetsEntryChs [][]FileInfoVersionsCh, maxKeys int, serverSetsListTolerancePerSet []int) (entries FilesInfoVersions) { var i = 0 - zonesEntriesInfos := make([][]FileInfoVersions, 0, len(zonesEntryChs)) - zonesEntriesValid := make([][]bool, 0, len(zonesEntryChs)) - for _, entryChs := range zonesEntryChs { - zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs))) - zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) + serverSetsEntriesInfos := make([][]FileInfoVersions, 0, len(serverSetsEntryChs)) + serverSetsEntriesValid := make([][]bool, 0, len(serverSetsEntryChs)) + for _, entryChs := range serverSetsEntryChs { + serverSetsEntriesInfos = append(serverSetsEntriesInfos, make([]FileInfoVersions, len(entryChs))) + serverSetsEntriesValid = append(serverSetsEntriesValid, make([]bool, len(entryChs))) } for { - fi, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + fi, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(serverSetsEntryChs, serverSetsEntriesInfos, serverSetsEntriesValid) if !ok { // We have reached EOF across all entryChs, break the loop. break } - if quorumCount < zonesListTolerancePerSet[zoneIndex] { + if quorumCount < serverSetsListTolerancePerSet[zoneIndex] { // Skip entries which are not found upto the expected tolerance continue } @@ -1136,31 +1136,31 @@ func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys i entries.FilesVersions = append(entries.FilesVersions, fi) i++ if i == maxKeys { - entries.IsTruncated = isTruncatedZonesVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + entries.IsTruncated = isTruncatedServerSetsVersions(serverSetsEntryChs, serverSetsEntriesInfos, serverSetsEntriesValid) break } } return entries } -// mergeZonesEntriesCh - merges FileInfo channel to entries upto maxKeys. -func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, zonesListTolerancePerSet []int) (entries FilesInfo) { +// mergeServerSetsEntriesCh - merges FileInfo channel to entries upto maxKeys. +func mergeServerSetsEntriesCh(serverSetsEntryChs [][]FileInfoCh, maxKeys int, serverSetsListTolerancePerSet []int) (entries FilesInfo) { var i = 0 - zonesEntriesInfos := make([][]FileInfo, 0, len(zonesEntryChs)) - zonesEntriesValid := make([][]bool, 0, len(zonesEntryChs)) - for _, entryChs := range zonesEntryChs { - zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs))) - zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) + serverSetsEntriesInfos := make([][]FileInfo, 0, len(serverSetsEntryChs)) + serverSetsEntriesValid := make([][]bool, 0, len(serverSetsEntryChs)) + for _, entryChs := range serverSetsEntryChs { + serverSetsEntriesInfos = append(serverSetsEntriesInfos, make([]FileInfo, len(entryChs))) + serverSetsEntriesValid = append(serverSetsEntriesValid, make([]bool, len(entryChs))) } var prevEntry string for { - fi, quorumCount, zoneIndex, ok := lexicallySortedEntryZone(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + fi, quorumCount, zoneIndex, ok := lexicallySortedEntryZone(serverSetsEntryChs, serverSetsEntriesInfos, serverSetsEntriesValid) if !ok { // We have reached EOF across all entryChs, break the loop. break } - if quorumCount < zonesListTolerancePerSet[zoneIndex] { + if quorumCount < serverSetsListTolerancePerSet[zoneIndex] { // Skip entries which are not found upto configured tolerance. continue } @@ -1172,7 +1172,7 @@ func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, zonesListTol entries.Files = append(entries.Files, fi) i++ if i == maxKeys { - entries.IsTruncated = isTruncatedZones(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + entries.IsTruncated = isTruncatedServerSets(serverSetsEntryChs, serverSetsEntriesInfos, serverSetsEntriesValid) break } prevEntry = fi.Name @@ -1180,7 +1180,7 @@ func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, zonesListTol return entries } -func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zoneEntriesValid [][]bool) bool { +func isTruncatedServerSets(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zoneEntriesValid [][]bool) bool { for i, entryChs := range zoneEntryChs { for j := range entryChs { zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop() @@ -1210,7 +1210,7 @@ func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zon return isTruncated } -func isTruncatedZonesVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) bool { +func isTruncatedServerSetsVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) bool { for i, entryChs := range zoneEntryChs { for j := range entryChs { zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop() @@ -1240,7 +1240,7 @@ func isTruncatedZonesVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [ return isTruncated } -func (z *erasureZones) listObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { +func (z *erasureServerSets) listObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { loi := ListObjectVersionsInfo{} if err := checkListObjsArgs(ctx, bucket, prefix, marker, z); err != nil { @@ -1288,25 +1288,25 @@ func (z *erasureZones) listObjectVersions(ctx context.Context, bucket, prefix, m recursive = false } - zonesEntryChs := make([][]FileInfoVersionsCh, 0, len(z.zones)) - zonesEndWalkCh := make([]chan struct{}, 0, len(z.zones)) - zonesListTolerancePerSet := make([]int, 0, len(z.zones)) - for _, zone := range z.zones { + serverSetsEntryChs := make([][]FileInfoVersionsCh, 0, len(z.serverSets)) + serverSetsEndWalkCh := make([]chan struct{}, 0, len(z.serverSets)) + serverSetsListTolerancePerSet := make([]int, 0, len(z.serverSets)) + for _, zone := range z.serverSets { entryChs, endWalkCh := zone.poolVersions.Release(listParams{bucket, recursive, marker, prefix}) if entryChs == nil { endWalkCh = make(chan struct{}) entryChs = zone.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, endWalkCh, zone.listTolerancePerSet) } - zonesEntryChs = append(zonesEntryChs, entryChs) - zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh) + serverSetsEntryChs = append(serverSetsEntryChs, entryChs) + serverSetsEndWalkCh = append(serverSetsEndWalkCh, endWalkCh) if zone.listTolerancePerSet == -1 { - zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.setDriveCount/2) + serverSetsListTolerancePerSet = append(serverSetsListTolerancePerSet, zone.setDriveCount/2) } else { - zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet-2) + serverSetsListTolerancePerSet = append(serverSetsListTolerancePerSet, zone.listTolerancePerSet-2) } } - entries := mergeZonesEntriesVersionsCh(zonesEntryChs, maxKeys, zonesListTolerancePerSet) + entries := mergeServerSetsEntriesVersionsCh(serverSetsEntryChs, maxKeys, serverSetsListTolerancePerSet) if len(entries.FilesVersions) == 0 { return loi, nil } @@ -1327,29 +1327,29 @@ func (z *erasureZones) listObjectVersions(ctx context.Context, bucket, prefix, m } } if loi.IsTruncated { - for i, zone := range z.zones { - zone.poolVersions.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, zonesEntryChs[i], - zonesEndWalkCh[i]) + for i, zone := range z.serverSets { + zone.poolVersions.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, serverSetsEntryChs[i], + serverSetsEndWalkCh[i]) } } return loi, nil } -func (z *erasureZones) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { +func (z *erasureServerSets) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { return z.listObjectVersions(ctx, bucket, prefix, marker, versionMarker, delimiter, maxKeys) } -func (z *erasureZones) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (z *erasureServerSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { return z.listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) } -func (z *erasureZones) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { +func (z *erasureServerSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { if err := checkListMultipartArgs(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, z); err != nil { return ListMultipartsInfo{}, err } if z.SingleZone() { - return z.zones[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) + return z.serverSets[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) } var zoneResult = ListMultipartsInfo{} @@ -1357,7 +1357,7 @@ func (z *erasureZones) ListMultipartUploads(ctx context.Context, bucket, prefix, zoneResult.KeyMarker = keyMarker zoneResult.Prefix = prefix zoneResult.Delimiter = delimiter - for _, zone := range z.zones { + for _, zone := range z.serverSets { result, err := zone.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) if err != nil { @@ -1369,13 +1369,13 @@ func (z *erasureZones) ListMultipartUploads(ctx context.Context, bucket, prefix, } // Initiate a new multipart upload on a hashedSet based on object name. -func (z *erasureZones) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { +func (z *erasureServerSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { if err := checkNewMultipartArgs(ctx, bucket, object, z); err != nil { return "", err } if z.SingleZone() { - return z.zones[0].NewMultipartUpload(ctx, bucket, object, opts) + return z.serverSets[0].NewMultipartUpload(ctx, bucket, object, opts) } // We don't know the exact size, so we ask for at least 1GiB file. @@ -1384,11 +1384,11 @@ func (z *erasureZones) NewMultipartUpload(ctx context.Context, bucket, object st return "", err } - return z.zones[idx].NewMultipartUpload(ctx, bucket, object, opts) + return z.serverSets[idx].NewMultipartUpload(ctx, bucket, object, opts) } // Copies a part of an object from source hashedSet to destination hashedSet. -func (z *erasureZones) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (PartInfo, error) { +func (z *erasureServerSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (PartInfo, error) { if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, z); err != nil { return PartInfo{}, err } @@ -1398,16 +1398,16 @@ func (z *erasureZones) CopyObjectPart(ctx context.Context, srcBucket, srcObject, } // PutObjectPart - writes part of an object to hashedSet based on the object name. -func (z *erasureZones) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) { +func (z *erasureServerSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) { if err := checkPutObjectPartArgs(ctx, bucket, object, z); err != nil { return PartInfo{}, err } if z.SingleZone() { - return z.zones[0].PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) + return z.serverSets[0].PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) } - for _, zone := range z.zones { + for _, zone := range z.serverSets { _, err := zone.GetMultipartInfo(ctx, bucket, object, uploadID, opts) if err == nil { return zone.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) @@ -1428,15 +1428,15 @@ func (z *erasureZones) PutObjectPart(ctx context.Context, bucket, object, upload } } -func (z *erasureZones) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { +func (z *erasureServerSets) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { if err := checkListPartsArgs(ctx, bucket, object, z); err != nil { return MultipartInfo{}, err } if z.SingleZone() { - return z.zones[0].GetMultipartInfo(ctx, bucket, object, uploadID, opts) + return z.serverSets[0].GetMultipartInfo(ctx, bucket, object, uploadID, opts) } - for _, zone := range z.zones { + for _, zone := range z.serverSets { mi, err := zone.GetMultipartInfo(ctx, bucket, object, uploadID, opts) if err == nil { return mi, nil @@ -1458,15 +1458,15 @@ func (z *erasureZones) GetMultipartInfo(ctx context.Context, bucket, object, upl } // ListObjectParts - lists all uploaded parts to an object in hashedSet. -func (z *erasureZones) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) { +func (z *erasureServerSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) { if err := checkListPartsArgs(ctx, bucket, object, z); err != nil { return ListPartsInfo{}, err } if z.SingleZone() { - return z.zones[0].ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) + return z.serverSets[0].ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) } - for _, zone := range z.zones { + for _, zone := range z.serverSets { _, err := zone.GetMultipartInfo(ctx, bucket, object, uploadID, opts) if err == nil { return zone.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) @@ -1485,16 +1485,16 @@ func (z *erasureZones) ListObjectParts(ctx context.Context, bucket, object, uplo } // Aborts an in-progress multipart operation on hashedSet based on the object name. -func (z *erasureZones) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error { +func (z *erasureServerSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error { if err := checkAbortMultipartArgs(ctx, bucket, object, z); err != nil { return err } if z.SingleZone() { - return z.zones[0].AbortMultipartUpload(ctx, bucket, object, uploadID, opts) + return z.serverSets[0].AbortMultipartUpload(ctx, bucket, object, uploadID, opts) } - for _, zone := range z.zones { + for _, zone := range z.serverSets { _, err := zone.GetMultipartInfo(ctx, bucket, object, uploadID, opts) if err == nil { return zone.AbortMultipartUpload(ctx, bucket, object, uploadID, opts) @@ -1514,21 +1514,21 @@ func (z *erasureZones) AbortMultipartUpload(ctx context.Context, bucket, object, } // CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name. -func (z *erasureZones) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (z *erasureServerSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { if err = checkCompleteMultipartArgs(ctx, bucket, object, z); err != nil { return objInfo, err } if z.SingleZone() { - return z.zones[0].CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) + return z.serverSets[0].CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) } // Purge any existing object. - for _, zone := range z.zones { + for _, zone := range z.serverSets { zone.DeleteObject(ctx, bucket, object, opts) } - for _, zone := range z.zones { + for _, zone := range z.serverSets { result, err := zone.ListMultipartUploads(ctx, bucket, object, "", "", "", maxUploadsList) if err != nil { return objInfo, err @@ -1544,10 +1544,10 @@ func (z *erasureZones) CompleteMultipartUpload(ctx context.Context, bucket, obje } } -// GetBucketInfo - returns bucket info from one of the erasure coded zones. -func (z *erasureZones) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { +// GetBucketInfo - returns bucket info from one of the erasure coded serverSets. +func (z *erasureServerSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { if z.SingleZone() { - bucketInfo, err = z.zones[0].GetBucketInfo(ctx, bucket) + bucketInfo, err = z.serverSets[0].GetBucketInfo(ctx, bucket) if err != nil { return bucketInfo, err } @@ -1557,7 +1557,7 @@ func (z *erasureZones) GetBucketInfo(ctx context.Context, bucket string) (bucket } return bucketInfo, nil } - for _, zone := range z.zones { + for _, zone := range z.serverSets { bucketInfo, err = zone.GetBucketInfo(ctx, bucket) if err != nil { if isErrBucketNotFound(err) { @@ -1577,43 +1577,43 @@ func (z *erasureZones) GetBucketInfo(ctx context.Context, bucket string) (bucket } // IsNotificationSupported returns whether bucket notification is applicable for this layer. -func (z *erasureZones) IsNotificationSupported() bool { +func (z *erasureServerSets) IsNotificationSupported() bool { return true } // IsListenSupported returns whether listen bucket notification is applicable for this layer. -func (z *erasureZones) IsListenSupported() bool { +func (z *erasureServerSets) IsListenSupported() bool { return true } // IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (z *erasureZones) IsEncryptionSupported() bool { +func (z *erasureServerSets) IsEncryptionSupported() bool { return true } // IsCompressionSupported returns whether compression is applicable for this layer. -func (z *erasureZones) IsCompressionSupported() bool { +func (z *erasureServerSets) IsCompressionSupported() bool { return true } -func (z *erasureZones) IsTaggingSupported() bool { +func (z *erasureServerSets) IsTaggingSupported() bool { return true } -// DeleteBucket - deletes a bucket on all zones simultaneously, -// even if one of the zones fail to delete buckets, we proceed to +// DeleteBucket - deletes a bucket on all serverSets simultaneously, +// even if one of the serverSets fail to delete buckets, we proceed to // undo a successful operation. -func (z *erasureZones) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { +func (z *erasureServerSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { if z.SingleZone() { - return z.zones[0].DeleteBucket(ctx, bucket, forceDelete) + return z.serverSets[0].DeleteBucket(ctx, bucket, forceDelete) } - g := errgroup.WithNErrs(len(z.zones)) + g := errgroup.WithNErrs(len(z.serverSets)) - // Delete buckets in parallel across all zones. - for index := range z.zones { + // Delete buckets in parallel across all serverSets. + for index := range z.serverSets { index := index g.Go(func() error { - return z.zones[index].DeleteBucket(ctx, bucket, forceDelete) + return z.serverSets[index].DeleteBucket(ctx, bucket, forceDelete) }, index) } @@ -1624,7 +1624,7 @@ func (z *erasureZones) DeleteBucket(ctx context.Context, bucket string, forceDel for _, err := range errs { if err != nil { if _, ok := err.(InsufficientWriteQuorum); ok { - undoDeleteBucketZones(ctx, bucket, z.zones, errs) + undoDeleteBucketServerSets(ctx, bucket, z.serverSets, errs) } return err @@ -1636,15 +1636,15 @@ func (z *erasureZones) DeleteBucket(ctx context.Context, bucket string, forceDel } // This function is used to undo a successful DeleteBucket operation. -func undoDeleteBucketZones(ctx context.Context, bucket string, zones []*erasureSets, errs []error) { - g := errgroup.WithNErrs(len(zones)) +func undoDeleteBucketServerSets(ctx context.Context, bucket string, serverSets []*erasureSets, errs []error) { + g := errgroup.WithNErrs(len(serverSets)) - // Undo previous delete bucket on all underlying zones. - for index := range zones { + // Undo previous delete bucket on all underlying serverSets. + for index := range serverSets { index := index g.Go(func() error { if errs[index] == nil { - return zones[index].MakeBucketWithLocation(ctx, bucket, BucketOptions{}) + return serverSets[index].MakeBucketWithLocation(ctx, bucket, BucketOptions{}) } return nil }, index) @@ -1653,14 +1653,14 @@ func undoDeleteBucketZones(ctx context.Context, bucket string, zones []*erasureS g.Wait() } -// List all buckets from one of the zones, we are not doing merge +// List all buckets from one of the serverSets, we are not doing merge // sort here just for simplification. As per design it is assumed -// that all buckets are present on all zones. -func (z *erasureZones) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { +// that all buckets are present on all serverSets. +func (z *erasureServerSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { if z.SingleZone() { - buckets, err = z.zones[0].ListBuckets(ctx) + buckets, err = z.serverSets[0].ListBuckets(ctx) } else { - for _, zone := range z.zones { + for _, zone := range z.serverSets { buckets, err = zone.ListBuckets(ctx) if err != nil { logger.LogIf(ctx, err) @@ -1681,10 +1681,10 @@ func (z *erasureZones) ListBuckets(ctx context.Context) (buckets []BucketInfo, e return buckets, nil } -func (z *erasureZones) ReloadFormat(ctx context.Context, dryRun bool) error { +func (z *erasureServerSets) ReloadFormat(ctx context.Context, dryRun bool) error { // No locks needed since reload happens in HealFormat under // write lock across all nodes. - for _, zone := range z.zones { + for _, zone := range z.serverSets { if err := zone.ReloadFormat(ctx, dryRun); err != nil { return err } @@ -1692,7 +1692,7 @@ func (z *erasureZones) ReloadFormat(ctx context.Context, dryRun bool) error { return nil } -func (z *erasureZones) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { +func (z *erasureServerSets) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { // Acquire lock on format.json formatLock := z.NewNSLock(ctx, minioMetaBucket, formatConfigFile) if err := formatLock.GetLock(globalOperationTimeout); err != nil { @@ -1706,13 +1706,13 @@ func (z *erasureZones) HealFormat(ctx context.Context, dryRun bool) (madmin.Heal } var countNoHeal int - for _, zone := range z.zones { + for _, zone := range z.serverSets { result, err := zone.HealFormat(ctx, dryRun) if err != nil && !errors.Is(err, errNoHealRequired) { logger.LogIf(ctx, err) continue } - // Count errNoHealRequired across all zones, + // Count errNoHealRequired across all serverSets, // to return appropriate error to the caller if errors.Is(err, errNoHealRequired) { countNoHeal++ @@ -1732,21 +1732,21 @@ func (z *erasureZones) HealFormat(ctx context.Context, dryRun bool) (madmin.Heal } } - // No heal returned by all zones, return errNoHealRequired - if countNoHeal == len(z.zones) { + // No heal returned by all serverSets, return errNoHealRequired + if countNoHeal == len(z.serverSets) { return r, errNoHealRequired } return r, nil } -func (z *erasureZones) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) { +func (z *erasureServerSets) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) { var r = madmin.HealResultItem{ Type: madmin.HealItemBucket, Bucket: bucket, } - for _, zone := range z.zones { + for _, zone := range z.serverSets { result, err := zone.HealBucket(ctx, bucket, dryRun, remove) if err != nil { switch err.(type) { @@ -1768,46 +1768,46 @@ func (z *erasureZones) HealBucket(ctx context.Context, bucket string, dryRun, re // to allocate a receive channel for ObjectInfo, upon any unhandled // error walker returns error. Optionally if context.Done() is received // then Walk() stops the walker. -func (z *erasureZones) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error { +func (z *erasureServerSets) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error { if err := checkListObjsArgs(ctx, bucket, prefix, "", z); err != nil { // Upon error close the channel. close(results) return err } - zonesListTolerancePerSet := make([]int, 0, len(z.zones)) - for _, zone := range z.zones { + serverSetsListTolerancePerSet := make([]int, 0, len(z.serverSets)) + for _, zone := range z.serverSets { if zone.listTolerancePerSet == -1 { - zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.setDriveCount/2) + serverSetsListTolerancePerSet = append(serverSetsListTolerancePerSet, zone.setDriveCount/2) } else { - zonesListTolerancePerSet = append(zonesListTolerancePerSet, zone.listTolerancePerSet-2) + serverSetsListTolerancePerSet = append(serverSetsListTolerancePerSet, zone.listTolerancePerSet-2) } } if opts.WalkVersions { - var zonesEntryChs [][]FileInfoVersionsCh - for _, zone := range z.zones { - zonesEntryChs = append(zonesEntryChs, zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, ctx.Done())) + var serverSetsEntryChs [][]FileInfoVersionsCh + for _, zone := range z.serverSets { + serverSetsEntryChs = append(serverSetsEntryChs, zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, ctx.Done())) } - var zonesEntriesInfos [][]FileInfoVersions - var zonesEntriesValid [][]bool - for _, entryChs := range zonesEntryChs { - zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs))) - zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) + var serverSetsEntriesInfos [][]FileInfoVersions + var serverSetsEntriesValid [][]bool + for _, entryChs := range serverSetsEntryChs { + serverSetsEntriesInfos = append(serverSetsEntriesInfos, make([]FileInfoVersions, len(entryChs))) + serverSetsEntriesValid = append(serverSetsEntriesValid, make([]bool, len(entryChs))) } go func() { defer close(results) for { - entry, quorumCount, zoneIdx, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + entry, quorumCount, zoneIdx, ok := lexicallySortedEntryZoneVersions(serverSetsEntryChs, serverSetsEntriesInfos, serverSetsEntriesValid) if !ok { // We have reached EOF across all entryChs, break the loop. return } - if quorumCount >= zonesListTolerancePerSet[zoneIdx] { + if quorumCount >= serverSetsListTolerancePerSet[zoneIdx] { for _, version := range entry.Versions { results <- version.ToObjectInfo(bucket, version.Name) } @@ -1818,29 +1818,29 @@ func (z *erasureZones) Walk(ctx context.Context, bucket, prefix string, results return nil } - zonesEntryChs := make([][]FileInfoCh, 0, len(z.zones)) - for _, zone := range z.zones { - zonesEntryChs = append(zonesEntryChs, zone.startMergeWalks(ctx, bucket, prefix, "", true, ctx.Done())) + serverSetsEntryChs := make([][]FileInfoCh, 0, len(z.serverSets)) + for _, zone := range z.serverSets { + serverSetsEntryChs = append(serverSetsEntryChs, zone.startMergeWalks(ctx, bucket, prefix, "", true, ctx.Done())) } - zonesEntriesInfos := make([][]FileInfo, 0, len(zonesEntryChs)) - zonesEntriesValid := make([][]bool, 0, len(zonesEntryChs)) - for _, entryChs := range zonesEntryChs { - zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs))) - zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) + serverSetsEntriesInfos := make([][]FileInfo, 0, len(serverSetsEntryChs)) + serverSetsEntriesValid := make([][]bool, 0, len(serverSetsEntryChs)) + for _, entryChs := range serverSetsEntryChs { + serverSetsEntriesInfos = append(serverSetsEntriesInfos, make([]FileInfo, len(entryChs))) + serverSetsEntriesValid = append(serverSetsEntriesValid, make([]bool, len(entryChs))) } go func() { defer close(results) for { - entry, quorumCount, zoneIdx, ok := lexicallySortedEntryZone(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + entry, quorumCount, zoneIdx, ok := lexicallySortedEntryZone(serverSetsEntryChs, serverSetsEntriesInfos, serverSetsEntriesValid) if !ok { // We have reached EOF across all entryChs, break the loop. return } - if quorumCount >= zonesListTolerancePerSet[zoneIdx] { + if quorumCount >= serverSetsListTolerancePerSet[zoneIdx] { results <- entry.ToObjectInfo(bucket, entry.Name) } } @@ -1852,24 +1852,24 @@ func (z *erasureZones) Walk(ctx context.Context, bucket, prefix string, results // HealObjectFn closure function heals the object. type HealObjectFn func(bucket, object, versionID string) error -func (z *erasureZones) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject HealObjectFn) error { +func (z *erasureServerSets) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject HealObjectFn) error { endWalkCh := make(chan struct{}) defer close(endWalkCh) - zonesEntryChs := make([][]FileInfoVersionsCh, 0, len(z.zones)) - zoneDrivesPerSet := make([]int, 0, len(z.zones)) + serverSetsEntryChs := make([][]FileInfoVersionsCh, 0, len(z.serverSets)) + zoneDrivesPerSet := make([]int, 0, len(z.serverSets)) - for _, zone := range z.zones { - zonesEntryChs = append(zonesEntryChs, + for _, zone := range z.serverSets { + serverSetsEntryChs = append(serverSetsEntryChs, zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, endWalkCh)) zoneDrivesPerSet = append(zoneDrivesPerSet, zone.setDriveCount) } - zonesEntriesInfos := make([][]FileInfoVersions, 0, len(zonesEntryChs)) - zonesEntriesValid := make([][]bool, 0, len(zonesEntryChs)) - for _, entryChs := range zonesEntryChs { - zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs))) - zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) + serverSetsEntriesInfos := make([][]FileInfoVersions, 0, len(serverSetsEntryChs)) + serverSetsEntriesValid := make([][]bool, 0, len(serverSetsEntryChs)) + for _, entryChs := range serverSetsEntryChs { + serverSetsEntriesInfos = append(serverSetsEntriesInfos, make([]FileInfoVersions, len(entryChs))) + serverSetsEntriesValid = append(serverSetsEntriesValid, make([]bool, len(entryChs))) } // If listing did not return any entries upon first attempt, we @@ -1877,7 +1877,7 @@ func (z *erasureZones) HealObjects(ctx context.Context, bucket, prefix string, o // actions they may want to take as if `prefix` is missing. err := toObjectErr(errFileNotFound, bucket, prefix) for { - entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(serverSetsEntryChs, serverSetsEntriesInfos, serverSetsEntriesValid) if !ok { break } @@ -1907,7 +1907,7 @@ func (z *erasureZones) HealObjects(ctx context.Context, bucket, prefix string, o return err } -func (z *erasureZones) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) { +func (z *erasureServerSets) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) { object = encodeDirObject(object) lk := z.NewNSLock(ctx, bucket, object) @@ -1927,9 +1927,9 @@ func (z *erasureZones) HealObject(ctx context.Context, bucket, object, versionID } if z.SingleZone() { - return z.zones[0].HealObject(ctx, bucket, object, versionID, opts) + return z.serverSets[0].HealObject(ctx, bucket, object, versionID, opts) } - for _, zone := range z.zones { + for _, zone := range z.serverSets { result, err := zone.HealObject(ctx, bucket, object, versionID, opts) if err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { @@ -1945,9 +1945,9 @@ func (z *erasureZones) HealObject(ctx context.Context, bucket, object, versionID } } -func (z *erasureZones) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { +func (z *erasureServerSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { var healBuckets []BucketInfo - for _, zone := range z.zones { + for _, zone := range z.serverSets { bucketsInfo, err := zone.ListBucketsHeal(ctx) if err != nil { continue @@ -1966,14 +1966,14 @@ func (z *erasureZones) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error } // GetMetrics - no op -func (z *erasureZones) GetMetrics(ctx context.Context) (*Metrics, error) { +func (z *erasureServerSets) GetMetrics(ctx context.Context) (*Metrics, error) { logger.LogIf(ctx, NotImplemented{}) return &Metrics{}, NotImplemented{} } -func (z *erasureZones) getZoneAndSet(id string) (int, int, error) { - for zoneIdx := range z.zones { - format := z.zones[zoneIdx].format +func (z *erasureServerSets) getZoneAndSet(id string) (int, int, error) { + for zoneIdx := range z.serverSets { + format := z.serverSets[zoneIdx].format for setIdx, set := range format.Erasure.Sets { for _, diskID := range set { if diskID == id { @@ -2004,10 +2004,10 @@ type HealthResult struct { // provides if write access exists across sets, additionally // can be used to query scenarios if health may be lost // if this node is taken down by an external orchestrator. -func (z *erasureZones) Health(ctx context.Context, opts HealthOptions) HealthResult { - erasureSetUpCount := make([][]int, len(z.zones)) - for i := range z.zones { - erasureSetUpCount[i] = make([]int, len(z.zones[i].sets)) +func (z *erasureServerSets) Health(ctx context.Context, opts HealthOptions) HealthResult { + erasureSetUpCount := make([][]int, len(z.serverSets)) + for i := range z.serverSets { + erasureSetUpCount[i] = make([]int, len(z.serverSets[i].sets)) } diskIDs := globalNotificationSys.GetLocalDiskIDs(ctx) @@ -2093,13 +2093,13 @@ func (z *erasureZones) Health(ctx context.Context, opts HealthOptions) HealthRes } // PutObjectTags - replace or add tags to an existing object -func (z *erasureZones) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { +func (z *erasureServerSets) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { object = encodeDirObject(object) if z.SingleZone() { - return z.zones[0].PutObjectTags(ctx, bucket, object, tags, opts) + return z.serverSets[0].PutObjectTags(ctx, bucket, object, tags, opts) } - for _, zone := range z.zones { + for _, zone := range z.serverSets { err := zone.PutObjectTags(ctx, bucket, object, tags, opts) if err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { @@ -2123,12 +2123,12 @@ func (z *erasureZones) PutObjectTags(ctx context.Context, bucket, object string, } // DeleteObjectTags - delete object tags from an existing object -func (z *erasureZones) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { +func (z *erasureServerSets) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { object = encodeDirObject(object) if z.SingleZone() { - return z.zones[0].DeleteObjectTags(ctx, bucket, object, opts) + return z.serverSets[0].DeleteObjectTags(ctx, bucket, object, opts) } - for _, zone := range z.zones { + for _, zone := range z.serverSets { err := zone.DeleteObjectTags(ctx, bucket, object, opts) if err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { @@ -2152,12 +2152,12 @@ func (z *erasureZones) DeleteObjectTags(ctx context.Context, bucket, object stri } // GetObjectTags - get object tags from an existing object -func (z *erasureZones) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { +func (z *erasureServerSets) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { object = encodeDirObject(object) if z.SingleZone() { - return z.zones[0].GetObjectTags(ctx, bucket, object, opts) + return z.serverSets[0].GetObjectTags(ctx, bucket, object, opts) } - for _, zone := range z.zones { + for _, zone := range z.serverSets { tags, err := zone.GetObjectTags(ctx, bucket, object, opts) if err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index ac33bc429..112b28fca 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -351,7 +351,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto sets: make([]*erasureObjects, setCount), erasureDisks: make([][]StorageAPI, setCount), erasureLockers: make([][]dsync.NetLocker, setCount), - erasureLockOwner: mustGetUUID(), + erasureLockOwner: GetLocalPeer(globalEndpoints), endpoints: endpoints, endpointStrings: endpointStrings, setCount: setCount, @@ -435,7 +435,7 @@ func (s *erasureSets) SetDriveCount() int { } // StorageUsageInfo - combines output of StorageInfo across all erasure coded object sets. -// This only returns disk usage info for Zones to perform placement decision, this call +// This only returns disk usage info for ServerSets to perform placement decision, this call // is not implemented in Object interface and is not meant to be used by other object // layer implementations. func (s *erasureSets) StorageUsageInfo(ctx context.Context) StorageInfo { diff --git a/cmd/erasure-sets_test.go b/cmd/erasure-sets_test.go index 337c5ce7f..80ba8d9b1 100644 --- a/cmd/erasure-sets_test.go +++ b/cmd/erasure-sets_test.go @@ -213,8 +213,8 @@ func TestHashedLayer(t *testing.T) { defer os.RemoveAll(dir) } - z := obj.(*erasureZones) - objs = append(objs, z.zones[0].sets[0]) + z := obj.(*erasureServerSets) + objs = append(objs, z.serverSets[0].sets[0]) } sets := &erasureSets{sets: objs, distributionAlgo: "CRCMOD"} diff --git a/cmd/globals.go b/cmd/globals.go index b88add2a4..ceac88e65 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -188,7 +188,7 @@ var ( // registered listeners globalConsoleSys *HTTPConsoleLoggerSys - globalEndpoints EndpointZones + globalEndpoints EndpointServerSets // Global server's network statistics globalConnStats = newConnStats() diff --git a/cmd/lock-rest-server.go b/cmd/lock-rest-server.go index dc47518c3..d65d955bd 100644 --- a/cmd/lock-rest-server.go +++ b/cmd/lock-rest-server.go @@ -247,7 +247,7 @@ func lockMaintenance(ctx context.Context, interval time.Duration) error { return nil } - z, ok := objAPI.(*erasureZones) + z, ok := objAPI.(*erasureServerSets) if !ok { return nil } @@ -368,8 +368,8 @@ func startLockMaintenance(ctx context.Context) { } // registerLockRESTHandlers - register lock rest router. -func registerLockRESTHandlers(router *mux.Router, endpointZones EndpointZones) { - for _, ep := range endpointZones { +func registerLockRESTHandlers(router *mux.Router, endpointServerSets EndpointServerSets) { + for _, ep := range endpointServerSets { for _, endpoint := range ep.Endpoints { if !endpoint.IsLocal { continue diff --git a/cmd/notification.go b/cmd/notification.go index caf7c2d00..631627c26 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -1187,7 +1187,7 @@ func (sys *NotificationSys) GetLocalDiskIDs(ctx context.Context) (localDiskIDs [ } // NewNotificationSys - creates new notification system object. -func NewNotificationSys(endpoints EndpointZones) *NotificationSys { +func NewNotificationSys(endpoints EndpointServerSets) *NotificationSys { // targetList/bucketRulesMap/bucketRemoteTargetRulesMap are populated by NotificationSys.Init() return &NotificationSys{ targetList: event.NewTargetList(), diff --git a/cmd/obdinfo.go b/cmd/obdinfo.go index 3085228dc..fef819967 100644 --- a/cmd/obdinfo.go +++ b/cmd/obdinfo.go @@ -61,10 +61,10 @@ func getLocalCPUOBDInfo(ctx context.Context, r *http.Request) madmin.ServerCPUOB } -func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointZones EndpointZones, r *http.Request) madmin.ServerDrivesOBDInfo { +func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointServerSets EndpointServerSets, r *http.Request) madmin.ServerDrivesOBDInfo { var drivesOBDInfo []madmin.DriveOBDInfo var wg sync.WaitGroup - for _, ep := range endpointZones { + for _, ep := range endpointServerSets { for _, endpoint := range ep.Endpoints { // Only proceed for local endpoints if endpoint.IsLocal { @@ -105,7 +105,7 @@ func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointZones Endpoin addr := r.Host if globalIsDistErasure { - addr = GetLocalPeer(endpointZones) + addr = GetLocalPeer(endpointServerSets) } if parallel { return madmin.ServerDrivesOBDInfo{ diff --git a/cmd/peer-rest-client.go b/cmd/peer-rest-client.go index 2056eda47..ec82bce2c 100644 --- a/cmd/peer-rest-client.go +++ b/cmd/peer-rest-client.go @@ -822,8 +822,8 @@ func (client *peerRESTClient) ConsoleLog(logCh chan interface{}, doneCh <-chan s }() } -func getRemoteHosts(endpointZones EndpointZones) []*xnet.Host { - peers := GetRemotePeers(endpointZones) +func getRemoteHosts(endpointServerSets EndpointServerSets) []*xnet.Host { + peers := GetRemotePeers(endpointServerSets) remoteHosts := make([]*xnet.Host, 0, len(peers)) for _, hostStr := range peers { host, err := xnet.ParseHost(hostStr) @@ -838,7 +838,7 @@ func getRemoteHosts(endpointZones EndpointZones) []*xnet.Host { } // newPeerRestClients creates new peer clients. -func newPeerRestClients(endpoints EndpointZones) []*peerRESTClient { +func newPeerRestClients(endpoints EndpointServerSets) []*peerRESTClient { peerHosts := getRemoteHosts(endpoints) restClients := make([]*peerRESTClient, len(peerHosts)) for i, host := range peerHosts { diff --git a/cmd/peer-rest-server.go b/cmd/peer-rest-server.go index 96bb11698..3fac550b4 100644 --- a/cmd/peer-rest-server.go +++ b/cmd/peer-rest-server.go @@ -727,11 +727,11 @@ func (s *peerRESTServer) PutBucketNotificationHandler(w http.ResponseWriter, r * } // Return disk IDs of all the local disks. -func getLocalDiskIDs(z *erasureZones) []string { +func getLocalDiskIDs(z *erasureServerSets) []string { var ids []string - for zoneIdx := range z.zones { - for _, set := range z.zones[zoneIdx].sets { + for zoneIdx := range z.serverSets { + for _, set := range z.serverSets[zoneIdx].sets { disks := set.getDisks() for _, disk := range disks { if disk == nil { @@ -776,7 +776,7 @@ func (s *peerRESTServer) GetLocalDiskIDs(w http.ResponseWriter, r *http.Request) return } - z, ok := objLayer.(*erasureZones) + z, ok := objLayer.(*erasureServerSets) if !ok { s.writeErrorResponse(w, errServerNotInitialized) return diff --git a/cmd/routers.go b/cmd/routers.go index 23439bb10..467041ae5 100644 --- a/cmd/routers.go +++ b/cmd/routers.go @@ -23,9 +23,9 @@ import ( ) // Composed function registering routers for only distributed Erasure setup. -func registerDistErasureRouters(router *mux.Router, endpointZones EndpointZones) { +func registerDistErasureRouters(router *mux.Router, endpointServerSets EndpointServerSets) { // Register storage REST router only if its a distributed setup. - registerStorageRESTHandlers(router, endpointZones) + registerStorageRESTHandlers(router, endpointServerSets) // Register peer REST router only if its a distributed setup. registerPeerRESTHandlers(router) @@ -34,7 +34,7 @@ func registerDistErasureRouters(router *mux.Router, endpointZones EndpointZones) registerBootstrapRESTHandlers(router) // Register distributed namespace lock routers. - registerLockRESTHandlers(router, endpointZones) + registerLockRESTHandlers(router, endpointServerSets) } // List of some generic handlers which are applied for all incoming requests. @@ -79,14 +79,14 @@ var globalHandlers = []MiddlewareFunc{ } // configureServer handler returns final handler for the http server. -func configureServerHandler(endpointZones EndpointZones) (http.Handler, error) { +func configureServerHandler(endpointServerSets EndpointServerSets) (http.Handler, error) { // Initialize router. `SkipClean(true)` stops gorilla/mux from // normalizing URL path minio/minio#3256 router := mux.NewRouter().SkipClean(true).UseEncodedPath() // Initialize distributed NS lock. if globalIsDistErasure { - registerDistErasureRouters(router, endpointZones) + registerDistErasureRouters(router, endpointServerSets) } // Add STS router always. diff --git a/cmd/server-main.go b/cmd/server-main.go index 6c9e7953b..a7af0e596 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -516,12 +516,12 @@ func serverMain(ctx *cli.Context) { } // Initialize object layer with the supplied disks, objectLayer is nil upon any error. -func newObjectLayer(ctx context.Context, endpointZones EndpointZones) (newObject ObjectLayer, err error) { +func newObjectLayer(ctx context.Context, endpointServerSets EndpointServerSets) (newObject ObjectLayer, err error) { // For FS only, directly use the disk. - if endpointZones.NEndpoints() == 1 { + if endpointServerSets.NEndpoints() == 1 { // Initialize new FS object layer. - return NewFSObjectLayer(endpointZones[0].Endpoints[0].Path) + return NewFSObjectLayer(endpointServerSets[0].Endpoints[0].Path) } - return newErasureZones(ctx, endpointZones) + return newErasureServerSets(ctx, endpointServerSets) } diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index 07340bc87..421ce04dc 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -58,7 +58,7 @@ func TestNewObjectLayer(t *testing.T) { t.Fatal("Unexpected object layer initialization error", err) } - _, ok = obj.(*erasureZones) + _, ok = obj.(*erasureServerSets) if !ok { t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj)) } diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go index a4e7ac1c7..582bee117 100644 --- a/cmd/storage-rest-server.go +++ b/cmd/storage-rest-server.go @@ -890,8 +890,8 @@ func logFatalErrs(err error, endpoint Endpoint, exit bool) { } // registerStorageRPCRouter - register storage rpc router. -func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones) { - for _, ep := range endpointZones { +func registerStorageRESTHandlers(router *mux.Router, endpointServerSets EndpointServerSets) { + for _, ep := range endpointServerSets { for _, endpoint := range ep.Endpoints { if !endpoint.IsLocal { continue diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 6f706c3a9..1f7df4426 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -286,7 +286,7 @@ func isSameType(obj1, obj2 interface{}) bool { // defer s.Stop() type TestServer struct { Root string - Disks EndpointZones + Disks EndpointServerSets AccessKey string SecretKey string Server *httptest.Server @@ -403,7 +403,7 @@ func resetGlobalConfig() { } func resetGlobalEndpoints() { - globalEndpoints = EndpointZones{} + globalEndpoints = EndpointServerSets{} } func resetGlobalIsErasure() { @@ -1546,14 +1546,14 @@ func getRandomDisks(N int) ([]string, error) { } // Initialize object layer with the supplied disks, objectLayer is nil upon any error. -func newTestObjectLayer(ctx context.Context, endpointZones EndpointZones) (newObject ObjectLayer, err error) { +func newTestObjectLayer(ctx context.Context, endpointServerSets EndpointServerSets) (newObject ObjectLayer, err error) { // For FS only, directly use the disk. - if endpointZones.NEndpoints() == 1 { + if endpointServerSets.NEndpoints() == 1 { // Initialize new FS object layer. - return NewFSObjectLayer(endpointZones[0].Endpoints[0].Path) + return NewFSObjectLayer(endpointServerSets[0].Endpoints[0].Path) } - z, err := newErasureZones(ctx, endpointZones) + z, err := newErasureServerSets(ctx, endpointServerSets) if err != nil { return nil, err } @@ -1566,16 +1566,16 @@ func newTestObjectLayer(ctx context.Context, endpointZones EndpointZones) (newOb } // initObjectLayer - Instantiates object layer and returns it. -func initObjectLayer(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, []StorageAPI, error) { - objLayer, err := newTestObjectLayer(ctx, endpointZones) +func initObjectLayer(ctx context.Context, endpointServerSets EndpointServerSets) (ObjectLayer, []StorageAPI, error) { + objLayer, err := newTestObjectLayer(ctx, endpointServerSets) if err != nil { return nil, nil, err } var formattedDisks []StorageAPI // Should use the object layer tests for validating cache. - if z, ok := objLayer.(*erasureZones); ok { - formattedDisks = z.zones[0].GetDisks(0)() + if z, ok := objLayer.(*erasureServerSets); ok { + formattedDisks = z.serverSets[0].GetDisks(0)() } // Success. @@ -2212,7 +2212,7 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) { return certOut.Bytes(), keyOut.Bytes(), nil } -func mustGetZoneEndpoints(args ...string) EndpointZones { +func mustGetZoneEndpoints(args ...string) EndpointServerSets { endpoints := mustGetNewEndpoints(args...) return []ZoneEndpoints{{ SetCount: 1, @@ -2227,8 +2227,8 @@ func mustGetNewEndpoints(args ...string) (endpoints Endpoints) { return endpoints } -func getEndpointsLocalAddr(endpointZones EndpointZones) string { - for _, endpoints := range endpointZones { +func getEndpointsLocalAddr(endpointServerSets EndpointServerSets) string { + for _, endpoints := range endpointServerSets { for _, endpoint := range endpoints.Endpoints { if endpoint.IsLocal && endpoint.Type() == URLEndpointType { return endpoint.Host diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index 54e742b37..082089e02 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -1224,17 +1224,17 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) { } // Set faulty disks to Erasure backend - z := obj.(*erasureZones) - xl := z.zones[0].sets[0] + z := obj.(*erasureServerSets) + xl := z.serverSets[0].sets[0] erasureDisks := xl.getDisks() - z.zones[0].erasureDisksMu.Lock() + z.serverSets[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { for i, d := range erasureDisks { erasureDisks[i] = newNaughtyDisk(d, nil, errFaultyDisk) } return erasureDisks } - z.zones[0].erasureDisksMu.Unlock() + z.serverSets[0].erasureDisksMu.Unlock() // Initialize web rpc endpoint. apiRouter := initTestWebRPCEndPoint(obj) diff --git a/docs/distributed/DESIGN.md b/docs/distributed/DESIGN.md index 9ba2c3478..315bcad6f 100644 --- a/docs/distributed/DESIGN.md +++ b/docs/distributed/DESIGN.md @@ -94,37 +94,37 @@ Input for the key is the object name specified in `PutObject()`, returns a uniqu - MinIO does erasure coding at the object level not at the volume level, unlike other object storage vendors. This allows applications to choose different storage class by setting `x-amz-storage-class=STANDARD/REDUCED_REDUNDANCY` for each object uploads so effectively utilizing the capacity of the cluster. Additionally these can also be enforced using IAM policies to make sure the client uploads with correct HTTP headers. -- MinIO also supports expansion of existing clusters in zones. Each zone is a self contained entity with same SLA's (read/write quorum) for each object as original cluster. By using the existing namespace for lookup validation MinIO ensures conflicting objects are not created. When no such object exists then MinIO simply uses the least used zone. +- MinIO also supports expansion of existing clusters in server sets. Each zone is a self contained entity with same SLA's (read/write quorum) for each object as original cluster. By using the existing namespace for lookup validation MinIO ensures conflicting objects are not created. When no such object exists then MinIO simply uses the least used zone. -__There are no limits on how many zones can be combined__ +__There are no limits on how many server sets can be combined__ ``` minio server http://host{1...32}/export{1...32} http://host{5...6}/export{1...8} ``` -In above example there are two zones +In above example there are two server sets - 32 * 32 = 1024 drives zone1 - 2 * 8 = 16 drives zone2 > Notice the requirement of common SLA here original cluster had 1024 drives with 16 drives per erasure set, second zone is expected to have a minimum of 16 drives to match the original cluster SLA or it should be in multiples of 16. -MinIO places new objects in zones based on proportionate free space, per zone. Following pseudo code demonstrates this behavior. +MinIO places new objects in server sets based on proportionate free space, per zone. Following pseudo code demonstrates this behavior. ```go func getAvailableZoneIdx(ctx context.Context) int { - zones := z.getZonesAvailableSpace(ctx) - total := zones.TotalAvailable() + serverSets := z.getServerSetsAvailableSpace(ctx) + total := serverSets.TotalAvailable() // choose when we reach this many choose := rand.Uint64() % total atTotal := uint64(0) - for _, zone := range zones { + for _, zone := range serverSets { atTotal += zone.Available if atTotal > choose && zone.Available > 0 { return zone.Index } } // Should not happen, but print values just in case. - panic(fmt.Errorf("reached end of zones (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) + panic(fmt.Errorf("reached end of serverSets (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) } ``` diff --git a/docs/distributed/README.md b/docs/distributed/README.md index f603830dd..6059dad29 100644 --- a/docs/distributed/README.md +++ b/docs/distributed/README.md @@ -77,10 +77,10 @@ For example: minio server http://host{1...4}/export{1...16} http://host{5...12}/export{1...16} ``` -Now the server has expanded total storage by _(newly_added_servers\*m)_ more disks, taking the total count to _(existing_servers\*m)+(newly_added_servers\*m)_ disks. New object upload requests automatically start using the least used cluster. This expansion strategy works endlessly, so you can perpetually expand your clusters as needed. When you restart, it is immediate and non-disruptive to the applications. Each group of servers in the command-line is called a zone. There are 2 zones in this example. New objects are placed in zones in proportion to the amount of free space in each zone. Within each zone, the location of the erasure-set of drives is determined based on a deterministic hashing algorithm. +Now the server has expanded total storage by _(newly_added_servers\*m)_ more disks, taking the total count to _(existing_servers\*m)+(newly_added_servers\*m)_ disks. New object upload requests automatically start using the least used cluster. This expansion strategy works endlessly, so you can perpetually expand your clusters as needed. When you restart, it is immediate and non-disruptive to the applications. Each group of servers in the command-line is called a zone. There are 2 server sets in this example. New objects are placed in server sets in proportion to the amount of free space in each zone. Within each zone, the location of the erasure-set of drives is determined based on a deterministic hashing algorithm. > __NOTE:__ __Each zone you add must have the same erasure coding set size as the original zone, so the same data redundancy SLA is maintained.__ -> For example, if your first zone was 8 drives, you could add further zones of 16, 32 or 1024 drives each. All you have to make sure is deployment SLA is multiples of original data redundancy SLA i.e 8. +> For example, if your first zone was 8 drives, you could add further server sets of 16, 32 or 1024 drives each. All you have to make sure is deployment SLA is multiples of original data redundancy SLA i.e 8. ## 3. Test your setup To test this setup, access the MinIO server via browser or [`mc`](https://docs.min.io/docs/minio-client-quickstart-guide). diff --git a/docs/zh_CN/distributed/DESIGN.md b/docs/zh_CN/distributed/DESIGN.md index 763384a7c..4c4529038 100644 --- a/docs/zh_CN/distributed/DESIGN.md +++ b/docs/zh_CN/distributed/DESIGN.md @@ -113,19 +113,19 @@ minio server http://host{1...32}/export{1...32} http://host{5...6}/export{1...8} MinIO根据每个区域的可用空间比例将新对象放置在区域中。以下伪代码演示了此行为。 ```go func getAvailableZoneIdx(ctx context.Context) int { - zones := z.getZonesAvailableSpace(ctx) - total := zones.TotalAvailable() + serverSets := z.getServerSetsAvailableSpace(ctx) + total := serverSets.TotalAvailable() // choose when we reach this many choose := rand.Uint64() % total atTotal := uint64(0) - for _, zone := range zones { + for _, zone := range serverSets { atTotal += zone.Available if atTotal > choose && zone.Available > 0 { return zone.Index } } // Should not happen, but print values just in case. - panic(fmt.Errorf("reached end of zones (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) + panic(fmt.Errorf("reached end of serverSets (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) } ```